hash
stringlengths
64
64
content
stringlengths
0
1.51M
4cc576c3b6f0b59674ec60d5a1d7f36e36953703a8ed743c9d36b5cfe7ddee1d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Facilities for diffing two FITS files. Includes objects for diffing entire FITS files, individual HDUs, FITS headers, or just FITS data. Used to implement the fitsdiff program. """ import difflib import fnmatch import functools import glob import io import operator import os.path import textwrap import warnings from collections import defaultdict from functools import reduce from inspect import signature from itertools import islice import numpy as np from ... import __version__ from ...utils import indent from .card import Card, BLANK_CARD from .header import Header from ...utils.decorators import deprecated_renamed_argument # HDUList is used in one of the doctests from .hdu.hdulist import fitsopen # pylint: disable=W0611 from .hdu.table import _TableLikeHDU from ...utils.exceptions import AstropyDeprecationWarning __all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff', 'TableDataDiff'] # Column attributes of interest for comparison _COL_ATTRS = [('unit', 'units'), ('null', 'null values'), ('bscale', 'bscales'), ('bzero', 'bzeros'), ('disp', 'display formats'), ('dim', 'dimensions')] # Smaller default shift-width for indent: indent = functools.partial(indent, width=2) class _BaseDiff: """ Base class for all FITS diff objects. When instantiating a FITS diff object, the first two arguments are always the two objects to diff (two FITS files, two FITS headers, etc.). Instantiating a ``_BaseDiff`` also causes the diff itself to be executed. The returned ``_BaseDiff`` instance has a number of attribute that describe the results of the diff operation. The most basic attribute, present on all ``_BaseDiff`` instances, is ``.identical`` which is `True` if the two objects being compared are identical according to the diff method for objects of that type. """ def __init__(self, a, b): """ The ``_BaseDiff`` class does not implement a ``_diff`` method and should not be instantiated directly. Instead instantiate the appropriate subclass of ``_BaseDiff`` for the objects being compared (for example, use `HeaderDiff` to compare two `Header` objects. """ self.a = a self.b = b # For internal use in report output self._fileobj = None self._indent = 0 self._diff() def __bool__(self): """ A ``_BaseDiff`` object acts as `True` in a boolean context if the two objects compared are identical. Otherwise it acts as `False`. """ return not self.identical @classmethod def fromdiff(cls, other, a, b): """ Returns a new Diff object of a specific subclass from an existing diff object, passing on the values for any arguments they share in common (such as ignore_keywords). For example:: >>> from astropy.io import fits >>> hdul1, hdul2 = fits.HDUList(), fits.HDUList() >>> headera, headerb = fits.Header(), fits.Header() >>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*']) >>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb) >>> list(hd.ignore_keywords) ['*'] """ sig = signature(cls.__init__) # The first 3 arguments of any Diff initializer are self, a, and b. kwargs = {} for arg in list(sig.parameters.keys())[3:]: if hasattr(other, arg): kwargs[arg] = getattr(other, arg) return cls(a, b, **kwargs) @property def identical(self): """ `True` if all the ``.diff_*`` attributes on this diff instance are empty, implying that no differences were found. Any subclass of ``_BaseDiff`` must have at least one ``.diff_*`` attribute, which contains a non-empty value if and only if some difference was found between the two objects being compared. """ return not any(getattr(self, attr) for attr in self.__dict__ if attr.startswith('diff_')) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def report(self, fileobj=None, indent=0, overwrite=False): """ Generates a text report on the differences (if any) between two objects, and either returns it as a string or writes it to a file-like object. Parameters ---------- fileobj : file-like object, string, or None (optional) If `None`, this method returns the report as a string. Otherwise it returns `None` and writes the report to the given file-like object (which must have a ``.write()`` method at a minimum), or to a new file at the path specified. indent : int The number of 4 space tabs to indent the report. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. Returns ------- report : str or None """ return_string = False filepath = None if isinstance(fileobj, str): if os.path.exists(fileobj) and not overwrite: raise OSError("File {0} exists, aborting (pass in " "overwrite=True to overwrite)".format(fileobj)) else: filepath = fileobj fileobj = open(filepath, 'w') elif fileobj is None: fileobj = io.StringIO() return_string = True self._fileobj = fileobj self._indent = indent # This is used internally by _writeln try: self._report() finally: if filepath: fileobj.close() if return_string: return fileobj.getvalue() def _writeln(self, text): self._fileobj.write(indent(text, self._indent) + '\n') def _diff(self): raise NotImplementedError def _report(self): raise NotImplementedError class FITSDiff(_BaseDiff): """Diff two FITS files by filename, or two `HDUList` objects. `FITSDiff` objects have the following diff attributes: - ``diff_hdu_count``: If the FITS files being compared have different numbers of HDUs, this contains a 2-tuple of the number of HDUs in each file. - ``diff_hdus``: If any HDUs with the same index are different, this contains a list of 2-tuples of the HDU index and the `HDUDiff` object representing the differences between the two HDUs. """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ Parameters ---------- a : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object. b : str or `HDUList` The filename of a FITS file on disk, or an `HDUList` object to compare to the first file. ignore_keywords : sequence, optional Header keywords to ignore when comparing two headers; the presence of these keywords and their values are ignored. Wildcard strings may also be included in the list. ignore_comments : sequence, optional A list of header keywords whose comments should be ignored in the comparison. May contain wildcard strings as with ignore_keywords. ignore_fields : sequence, optional The (case-insensitive) names of any table columns to ignore if any table data is to be compared. numdiffs : int, optional The number of pixel/table values to output when reporting HDU data differences. Though the count of differences is the same either way, this allows controlling the number of different values that are kept in memory or output. If a negative value is given, then numdiffs is treated as unlimited (default: 10). rtol : float, optional The relative difference to allow when comparing two float values either in header values, image arrays, or table columns (default: 0.0). Values which satisfy the expression .. math:: \\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right| are considered to be different. The underlying function used for comparison is `numpy.allclose`. .. versionchanged:: 2.0 ``rtol`` replaces the deprecated ``tolerance`` argument. atol : float, optional The allowed absolute difference. See also ``rtol`` parameter. .. versionadded:: 2.0 ignore_blanks : bool, optional Ignore extra whitespace at the end of string values either in headers or data. Extra leading whitespace is not ignored (default: True). ignore_blank_cards : bool, optional Ignore all cards that are blank, i.e. they only contain whitespace (default: True). """ if isinstance(a, str): try: a = fitsopen(a) except Exception as exc: raise OSError("error opening file a ({}): {}: {}".format( a, exc.__class__.__name__, exc.args[0])) close_a = True else: close_a = False if isinstance(b, str): try: b = fitsopen(b) except Exception as exc: raise OSError("error opening file b ({}): {}: {}".format( b, exc.__class__.__name__, exc.args[0])) close_b = True else: close_b = False # Normalize keywords/fields to ignore to upper case self.ignore_keywords = set(k.upper() for k in ignore_keywords) self.ignore_comments = set(k.upper() for k in ignore_comments) self.ignore_fields = set(k.upper() for k in ignore_fields) self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards self.diff_hdu_count = () self.diff_hdus = [] try: super().__init__(a, b) finally: if close_a: a.close() if close_b: b.close() def _diff(self): if len(self.a) != len(self.b): self.diff_hdu_count = (len(self.a), len(self.b)) # For now, just compare the extensions one by one in order...might # allow some more sophisticated types of diffing later... # TODO: Somehow or another simplify the passing around of diff # options--this will become important as the number of options grows for idx in range(min(len(self.a), len(self.b))): hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx]) if not hdu_diff.identical: self.diff_hdus.append((idx, hdu_diff)) def _report(self): wrapper = textwrap.TextWrapper(initial_indent=' ', subsequent_indent=' ') # print out heading and parameter values filenamea = self.a.filename() if not filenamea: filenamea = '<{} object at {:#x}>'.format( self.a.__class__.__name__, id(self.a)) filenameb = self.b.filename() if not filenameb: filenameb = '<{} object at {:#x}>'.format( self.b.__class__.__name__, id(self.b)) self._fileobj.write('\n') self._writeln(' fitsdiff: {}'.format(__version__)) self._writeln(' a: {}\n b: {}'.format(filenamea, filenameb)) if self.ignore_keywords: ignore_keywords = ' '.join(sorted(self.ignore_keywords)) self._writeln(' Keyword(s) not to be compared:\n{}' .format(wrapper.fill(ignore_keywords))) if self.ignore_comments: ignore_comments = ' '.join(sorted(self.ignore_comments)) self._writeln(' Keyword(s) whose comments are not to be compared' ':\n{}'.format(wrapper.fill(ignore_comments))) if self.ignore_fields: ignore_fields = ' '.join(sorted(self.ignore_fields)) self._writeln(' Table column(s) not to be compared:\n{}' .format(wrapper.fill(ignore_fields))) self._writeln(' Maximum number of different data values to be ' 'reported: {}'.format(self.numdiffs)) self._writeln(' Relative tolerance: {}, Absolute tolerance: {}' .format(self.rtol, self.atol)) if self.diff_hdu_count: self._fileobj.write('\n') self._writeln('Files contain different numbers of HDUs:') self._writeln(' a: {}'.format(self.diff_hdu_count[0])) self._writeln(' b: {}'.format(self.diff_hdu_count[1])) if not self.diff_hdus: self._writeln('No differences found between common HDUs.') return elif not self.diff_hdus: self._fileobj.write('\n') self._writeln('No differences found.') return for idx, hdu_diff in self.diff_hdus: # print out the extension heading if idx == 0: self._fileobj.write('\n') self._writeln('Primary HDU:') else: self._fileobj.write('\n') self._writeln('Extension HDU {}:'.format(idx)) hdu_diff.report(self._fileobj, indent=self._indent + 1) class HDUDiff(_BaseDiff): """ Diff two HDU objects, including their headers and their data (but only if both HDUs contain the same type of data (image, table, or unknown). `HDUDiff` objects have the following diff attributes: - ``diff_extnames``: If the two HDUs have different EXTNAME values, this contains a 2-tuple of the different extension names. - ``diff_extvers``: If the two HDUS have different EXTVER values, this contains a 2-tuple of the different extension versions. - ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this contains a 2-tuple of the different extension levels. - ``diff_extension_types``: If the two HDUs have different XTENSION values, this contains a 2-tuple of the different extension types. - ``diff_headers``: Contains a `HeaderDiff` object for the headers of the two HDUs. This will always contain an object--it may be determined whether the headers are different through ``diff_headers.identical``. - ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or `RawDataDiff` as appropriate for the data in the HDUs, and only if the two HDUs have non-empty data of the same type (`RawDataDiff` is used for HDUs containing non-empty data of an indeterminate type). """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ See `FITSDiff` for explanations of the initialization parameters. """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.ignore_fields = {k.upper() for k in ignore_fields} self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.numdiffs = numdiffs self.ignore_blanks = ignore_blanks self.diff_extnames = () self.diff_extvers = () self.diff_extlevels = () self.diff_extension_types = () self.diff_headers = None self.diff_data = None super().__init__(a, b) def _diff(self): if self.a.name != self.b.name: self.diff_extnames = (self.a.name, self.b.name) if self.a.ver != self.b.ver: self.diff_extvers = (self.a.ver, self.b.ver) if self.a.level != self.b.level: self.diff_extlevels = (self.a.level, self.b.level) if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'): self.diff_extension_types = (self.a.header.get('XTENSION'), self.b.header.get('XTENSION')) self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(), self.b.header.copy()) if self.a.data is None or self.b.data is None: # TODO: Perhaps have some means of marking this case pass elif self.a.is_image and self.b.is_image: self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data) elif (isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU)): # TODO: Replace this if/when _BaseHDU grows a .is_table property self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data) elif not self.diff_extension_types: # Don't diff the data for unequal extension types that are not # recognized image or table types self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data) def _report(self): if self.identical: self._writeln(" No differences found.") if self.diff_extension_types: self._writeln(" Extension types differ:\n a: {}\n " "b: {}".format(*self.diff_extension_types)) if self.diff_extnames: self._writeln(" Extension names differ:\n a: {}\n " "b: {}".format(*self.diff_extnames)) if self.diff_extvers: self._writeln(" Extension versions differ:\n a: {}\n " "b: {}".format(*self.diff_extvers)) if self.diff_extlevels: self._writeln(" Extension levels differ:\n a: {}\n " "b: {}".format(*self.diff_extlevels)) if not self.diff_headers.identical: self._fileobj.write('\n') self._writeln(" Headers contain differences:") self.diff_headers.report(self._fileobj, indent=self._indent + 1) if self.diff_data is not None and not self.diff_data.identical: self._fileobj.write('\n') self._writeln(" Data contains differences:") self.diff_data.report(self._fileobj, indent=self._indent + 1) class HeaderDiff(_BaseDiff): """ Diff two `Header` objects. `HeaderDiff` objects have the following diff attributes: - ``diff_keyword_count``: If the two headers contain a different number of keywords, this contains a 2-tuple of the keyword count for each header. - ``diff_keywords``: If either header contains one or more keywords that don't appear at all in the other header, this contains a 2-tuple consisting of a list of the keywords only appearing in header a, and a list of the keywords only appearing in header b. - ``diff_duplicate_keywords``: If a keyword appears in both headers at least once, but contains a different number of duplicates (for example, a different number of HISTORY cards in each header), an item is added to this dict with the keyword as the key, and a 2-tuple of the different counts of that keyword as the value. For example:: {'HISTORY': (20, 19)} means that header a contains 20 HISTORY cards, while header b contains only 19 HISTORY cards. - ``diff_keyword_values``: If any of the common keyword between the two headers have different values, they appear in this dict. It has a structure similar to ``diff_duplicate_keywords``, with the keyword as the key, and a 2-tuple of the different values as the value. For example:: {'NAXIS': (2, 3)} means that the NAXIS keyword has a value of 2 in header a, and a value of 3 in header b. This excludes any keywords matched by the ``ignore_keywords`` list. - ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains differences between keyword comments. `HeaderDiff` objects also have a ``common_keywords`` attribute that lists all keywords that appear in both headers. """ def __init__(self, a, b, ignore_keywords=[], ignore_comments=[], rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True, tolerance=None): """ See `FITSDiff` for explanations of the initialization parameters. """ self.ignore_keywords = {k.upper() for k in ignore_keywords} self.ignore_comments = {k.upper() for k in ignore_comments} self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.ignore_blanks = ignore_blanks self.ignore_blank_cards = ignore_blank_cards self.ignore_keyword_patterns = set() self.ignore_comment_patterns = set() for keyword in list(self.ignore_keywords): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_keywords.remove(keyword) self.ignore_keyword_patterns.add(keyword) for keyword in list(self.ignore_comments): keyword = keyword.upper() if keyword != '*' and glob.has_magic(keyword): self.ignore_comments.remove(keyword) self.ignore_comment_patterns.add(keyword) # Keywords appearing in each header self.common_keywords = [] # Set to the number of keywords in each header if the counts differ self.diff_keyword_count = () # Set if the keywords common to each header (excluding ignore_keywords) # appear in different positions within the header # TODO: Implement this self.diff_keyword_positions = () # Keywords unique to each header (excluding keywords in # ignore_keywords) self.diff_keywords = () # Keywords that have different numbers of duplicates in each header # (excluding keywords in ignore_keywords) self.diff_duplicate_keywords = {} # Keywords common to each header but having different values (excluding # keywords in ignore_keywords) self.diff_keyword_values = defaultdict(list) # Keywords common to each header but having different comments # (excluding keywords in ignore_keywords or in ignore_comments) self.diff_keyword_comments = defaultdict(list) if isinstance(a, str): a = Header.fromstring(a) if isinstance(b, str): b = Header.fromstring(b) if not (isinstance(a, Header) and isinstance(b, Header)): raise TypeError('HeaderDiff can only diff astropy.io.fits.Header ' 'objects or strings containing FITS headers.') super().__init__(a, b) # TODO: This doesn't pay much attention to the *order* of the keywords, # except in the case of duplicate keywords. The order should be checked # too, or at least it should be an option. def _diff(self): if self.ignore_blank_cards: cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD] cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD] else: cardsa = list(self.a.cards) cardsb = list(self.b.cards) # build dictionaries of keyword values and comments def get_header_values_comments(cards): values = {} comments = {} for card in cards: value = card.value if self.ignore_blanks and isinstance(value, str): value = value.rstrip() values.setdefault(card.keyword, []).append(value) comments.setdefault(card.keyword, []).append(card.comment) return values, comments valuesa, commentsa = get_header_values_comments(cardsa) valuesb, commentsb = get_header_values_comments(cardsb) # Normalize all keyword to upper-case for comparison's sake; # TODO: HIERARCH keywords should be handled case-sensitively I think keywordsa = {k.upper() for k in valuesa} keywordsb = {k.upper() for k in valuesb} self.common_keywords = sorted(keywordsa.intersection(keywordsb)) if len(cardsa) != len(cardsb): self.diff_keyword_count = (len(cardsa), len(cardsb)) # Any other diff attributes should exclude ignored keywords keywordsa = keywordsa.difference(self.ignore_keywords) keywordsb = keywordsb.difference(self.ignore_keywords) if self.ignore_keyword_patterns: for pattern in self.ignore_keyword_patterns: keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern)) keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern)) if '*' in self.ignore_keywords: # Any other differences between keywords are to be ignored return left_only_keywords = sorted(keywordsa.difference(keywordsb)) right_only_keywords = sorted(keywordsb.difference(keywordsa)) if left_only_keywords or right_only_keywords: self.diff_keywords = (left_only_keywords, right_only_keywords) # Compare count of each common keyword for keyword in self.common_keywords: if keyword in self.ignore_keywords: continue if self.ignore_keyword_patterns: skip = False for pattern in self.ignore_keyword_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue counta = len(valuesa[keyword]) countb = len(valuesb[keyword]) if counta != countb: self.diff_duplicate_keywords[keyword] = (counta, countb) # Compare keywords' values and comments for a, b in zip(valuesa[keyword], valuesb[keyword]): if diff_values(a, b, rtol=self.rtol, atol=self.atol): self.diff_keyword_values[keyword].append((a, b)) else: # If there are duplicate keywords we need to be able to # index each duplicate; if the values of a duplicate # are identical use None here self.diff_keyword_values[keyword].append(None) if not any(self.diff_keyword_values[keyword]): # No differences found; delete the array of Nones del self.diff_keyword_values[keyword] if '*' in self.ignore_comments or keyword in self.ignore_comments: continue if self.ignore_comment_patterns: skip = False for pattern in self.ignore_comment_patterns: if fnmatch.fnmatch(keyword, pattern): skip = True break if skip: continue for a, b in zip(commentsa[keyword], commentsb[keyword]): if diff_values(a, b): self.diff_keyword_comments[keyword].append((a, b)) else: self.diff_keyword_comments[keyword].append(None) if not any(self.diff_keyword_comments[keyword]): del self.diff_keyword_comments[keyword] def _report(self): if self.diff_keyword_count: self._writeln(' Headers have different number of cards:') self._writeln(' a: {}'.format(self.diff_keyword_count[0])) self._writeln(' b: {}'.format(self.diff_keyword_count[1])) if self.diff_keywords: for keyword in self.diff_keywords[0]: if keyword in Card._commentary_keywords: val = self.a[keyword][0] else: val = self.a[keyword] self._writeln(' Extra keyword {!r:8} in a: {!r}'.format( keyword, val)) for keyword in self.diff_keywords[1]: if keyword in Card._commentary_keywords: val = self.b[keyword][0] else: val = self.b[keyword] self._writeln(' Extra keyword {!r:8} in b: {!r}'.format( keyword, val)) if self.diff_duplicate_keywords: for keyword, count in sorted(self.diff_duplicate_keywords.items()): self._writeln(' Inconsistent duplicates of keyword {!r:8}:' .format(keyword)) self._writeln(' Occurs {} time(s) in a, {} times in (b)' .format(*count)) if self.diff_keyword_values or self.diff_keyword_comments: for keyword in self.common_keywords: report_diff_keyword_attr(self._fileobj, 'values', self.diff_keyword_values, keyword, ind=self._indent) report_diff_keyword_attr(self._fileobj, 'comments', self.diff_keyword_comments, keyword, ind=self._indent) # TODO: It might be good if there was also a threshold option for percentage of # different pixels: For example ignore if only 1% of the pixels are different # within some threshold. There are lots of possibilities here, but hold off # for now until specific cases come up. class ImageDataDiff(_BaseDiff): """ Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE extension HDU, though the data unit is assumed to be "pixels"). `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: If the two arrays contain either a different number of dimensions or different sizes in any dimension, this contains a 2-tuple of the shapes of each array. Currently no further comparison is performed on images that don't have the exact same dimensions. - ``diff_pixels``: If the two images contain any different pixels, this contains a list of 2-tuples of the array index where the difference was found, and another 2-tuple containing the different values. For example, if the pixel at (0, 0) contains different values this would look like:: [(0, 0), (1.1, 2.2)] where 1.1 and 2.2 are the values of that pixel in each array. This array only contains up to ``self.numdiffs`` differences, for storage efficiency. - ``diff_total``: The total number of different pixels found between the arrays. Although ``diff_pixels`` does not necessarily contain all the different pixel values, this can be used to get a count of the total number of differences found. - ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number of pixels in the arrays. """ def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0, tolerance=None): """ See `FITSDiff` for explanations of the initialization parameters. """ self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.diff_dimensions = () self.diff_pixels = [] self.diff_ratio = 0 # self.diff_pixels only holds up to numdiffs differing pixels, but this # self.diff_total stores the total count of differences between # the images, but not the different values self.diff_total = 0 super().__init__(a, b) def _diff(self): if self.a.shape != self.b.shape: self.diff_dimensions = (self.a.shape, self.b.shape) # Don't do any further comparison if the dimensions differ # TODO: Perhaps we could, however, diff just the intersection # between the two images return # Find the indices where the values are not equal # If neither a nor b are floating point (or complex), ignore rtol and # atol if not (np.issubdtype(self.a.dtype, np.inexact) or np.issubdtype(self.b.dtype, np.inexact)): rtol = 0 atol = 0 else: rtol = self.rtol atol = self.atol diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol) self.diff_total = len(diffs[0]) if self.diff_total == 0: # Then we're done return if self.numdiffs < 0: numdiffs = self.diff_total else: numdiffs = self.numdiffs self.diff_pixels = [(idx, (self.a[idx], self.b[idx])) for idx in islice(zip(*diffs), 0, numdiffs)] self.diff_ratio = float(self.diff_total) / float(len(self.a.flat)) def _report(self): if self.diff_dimensions: dimsa = ' x '.join(str(d) for d in reversed(self.diff_dimensions[0])) dimsb = ' x '.join(str(d) for d in reversed(self.diff_dimensions[1])) self._writeln(' Data dimensions differ:') self._writeln(' a: {}'.format(dimsa)) self._writeln(' b: {}'.format(dimsb)) # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_pixels: return for index, values in self.diff_pixels: index = [x + 1 for x in reversed(index)] self._writeln(' Data differs at {}:'.format(index)) report_diff_values(self._fileobj, values[0], values[1], ind=self._indent + 1) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different pixels found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class RawDataDiff(ImageDataDiff): """ `RawDataDiff` is just a special case of `ImageDataDiff` where the images are one-dimensional, and the data is treated as a 1-dimensional array of bytes instead of pixel values. This is used to compare the data of two non-standard extension HDUs that were not recognized as containing image or table data. `ImageDataDiff` objects have the following diff attributes: - ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of `ImageDataDiff` objects. Though the "dimension" of each array is just an integer representing the number of bytes in the data. - ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff` objects, but renamed to reflect the minor semantic difference that these are raw bytes and not pixel values. Also the indices are integers instead of tuples. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. """ def __init__(self, a, b, numdiffs=10): """ See `FITSDiff` for explanations of the initialization parameters. """ self.diff_dimensions = () self.diff_bytes = [] super().__init__(a, b, numdiffs=numdiffs) def _diff(self): super()._diff() if self.diff_dimensions: self.diff_dimensions = (self.diff_dimensions[0][0], self.diff_dimensions[1][0]) self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels] del self.diff_pixels def _report(self): if self.diff_dimensions: self._writeln(' Data sizes differ:') self._writeln(' a: {} bytes'.format(self.diff_dimensions[0])) self._writeln(' b: {} bytes'.format(self.diff_dimensions[1])) # For now we don't do any further comparison if the dimensions # differ; though in the future it might be nice to be able to # compare at least where the images intersect self._writeln(' No further data comparison performed.') return if not self.diff_bytes: return for index, values in self.diff_bytes: self._writeln(' Data differs at byte {}:'.format(index)) report_diff_values(self._fileobj, values[0], values[1], ind=self._indent + 1) self._writeln(' ...') self._writeln(' {} different bytes found ({:.2%} different).' .format(self.diff_total, self.diff_ratio)) class TableDataDiff(_BaseDiff): """ Diff two table data arrays. It doesn't matter whether the data originally came from a binary or ASCII table--the data should be passed in as a recarray. `TableDataDiff` objects have the following diff attributes: - ``diff_column_count``: If the tables being compared have different numbers of columns, this contains a 2-tuple of the column count in each table. Even if the tables have different column counts, an attempt is still made to compare any columns they have in common. - ``diff_columns``: If either table contains columns unique to that table, either in name or format, this contains a 2-tuple of lists. The first element is a list of columns (these are full `Column` objects) that appear only in table a. The second element is a list of tables that appear only in table b. This only lists columns with different column definitions, and has nothing to do with the data in those columns. - ``diff_column_names``: This is like ``diff_columns``, but lists only the names of columns unique to either table, rather than the full `Column` objects. - ``diff_column_attributes``: Lists columns that are in both tables but have different secondary attributes, such as TUNIT or TDISP. The format is a list of 2-tuples: The first a tuple of the column name and the attribute, the second a tuple of the different values. - ``diff_values``: `TableDataDiff` compares the data in each table on a column-by-column basis. If any different data is found, it is added to this list. The format of this list is similar to the ``diff_pixels`` attribute on `ImageDataDiff` objects, though the "index" consists of a (column_name, row) tuple. For example:: [('TARGET', 0), ('NGC1001', 'NGC1002')] shows that the tables contain different values in the 0-th row of the 'TARGET' column. - ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`. `TableDataDiff` objects also have a ``common_columns`` attribute that lists the `Column` objects for columns that are identical in both tables, and a ``common_column_names`` attribute which contains a set of the names of those columns. """ def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0, tolerance=None): """ See `FITSDiff` for explanations of the initialization parameters. """ self.ignore_fields = set(ignore_fields) self.numdiffs = numdiffs self.rtol = rtol self.atol = atol if tolerance is not None: # This should be removed in the next astropy version warnings.warn( '"tolerance" was deprecated in version 2.0 and will be removed in ' 'a future version. Use argument "rtol" instead.', AstropyDeprecationWarning) self.rtol = tolerance # when tolerance is provided *always* ignore `rtol` # during the transition/deprecation period self.common_columns = [] self.common_column_names = set() # self.diff_columns contains columns with different column definitions, # but not different column data. Column data is only compared in # columns that have the same definitions self.diff_rows = () self.diff_column_count = () self.diff_columns = () # If two columns have the same name+format, but other attributes are # different (such as TUNIT or such) they are listed here self.diff_column_attributes = [] # Like self.diff_columns, but just contains a list of the column names # unique to each table, and in the order they appear in the tables self.diff_column_names = () self.diff_values = [] self.diff_ratio = 0 self.diff_total = 0 super().__init__(a, b) def _diff(self): # Much of the code for comparing columns is similar to the code for # comparing headers--consider refactoring colsa = self.a.columns colsb = self.b.columns if len(colsa) != len(colsb): self.diff_column_count = (len(colsa), len(colsb)) # Even if the number of columns are unequal, we still do comparison of # any common columns colsa = {c.name.lower(): c for c in colsa} colsb = {c.name.lower(): c for c in colsb} if '*' in self.ignore_fields: # If all columns are to be ignored, ignore any further differences # between the columns return # Keep the user's original ignore_fields list for reporting purposes, # but internally use a case-insensitive version ignore_fields = {f.lower() for f in self.ignore_fields} # It might be nice if there were a cleaner way to do this, but for now # it'll do for fieldname in ignore_fields: fieldname = fieldname.lower() if fieldname in colsa: del colsa[fieldname] if fieldname in colsb: del colsb[fieldname] colsa_set = set(colsa.values()) colsb_set = set(colsb.values()) self.common_columns = sorted(colsa_set.intersection(colsb_set), key=operator.attrgetter('name')) self.common_column_names = {col.name.lower() for col in self.common_columns} left_only_columns = {col.name.lower(): col for col in colsa_set.difference(colsb_set)} right_only_columns = {col.name.lower(): col for col in colsb_set.difference(colsa_set)} if left_only_columns or right_only_columns: self.diff_columns = (left_only_columns, right_only_columns) self.diff_column_names = ([], []) if left_only_columns: for col in self.a.columns: if col.name.lower() in left_only_columns: self.diff_column_names[0].append(col.name) if right_only_columns: for col in self.b.columns: if col.name.lower() in right_only_columns: self.diff_column_names[1].append(col.name) # If the tables have a different number of rows, we don't compare the # columns right now. # TODO: It might be nice to optionally compare the first n rows where n # is the minimum of the row counts between the two tables. if len(self.a) != len(self.b): self.diff_rows = (len(self.a), len(self.b)) return # If the tables contain no rows there's no data to compare, so we're # done at this point. (See ticket #178) if len(self.a) == len(self.b) == 0: return # Like in the old fitsdiff, compare tables on a column by column basis # The difficulty here is that, while FITS column names are meant to be # case-insensitive, Astropy still allows, for the sake of flexibility, # two columns with the same name but different case. When columns are # accessed in FITS tables, a case-sensitive is tried first, and failing # that a case-insensitive match is made. # It's conceivable that the same column could appear in both tables # being compared, but with different case. # Though it *may* lead to inconsistencies in these rare cases, this # just assumes that there are no duplicated column names in either # table, and that the column names can be treated case-insensitively. for col in self.common_columns: name_lower = col.name.lower() if name_lower in ignore_fields: continue cola = colsa[name_lower] colb = colsb[name_lower] for attr, _ in _COL_ATTRS: vala = getattr(cola, attr, None) valb = getattr(colb, attr, None) if diff_values(vala, valb): self.diff_column_attributes.append( ((col.name.upper(), attr), (vala, valb))) arra = self.a[col.name] arrb = self.b[col.name] if (np.issubdtype(arra.dtype, np.floating) and np.issubdtype(arrb.dtype, np.floating)): diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol) elif 'P' in col.format: diffs = ([idx for idx in range(len(arra)) if not np.allclose(arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol)],) else: diffs = np.where(arra != arrb) self.diff_total += len(set(diffs[0])) if self.numdiffs >= 0: if len(self.diff_values) >= self.numdiffs: # Don't save any more diff values continue # Add no more diff'd values than this max_diffs = self.numdiffs - len(self.diff_values) else: max_diffs = len(diffs[0]) last_seen_idx = None for idx in islice(diffs[0], 0, max_diffs): if idx == last_seen_idx: # Skip duplicate indices, which my occur when the column # data contains multi-dimensional values; we're only # interested in storing row-by-row differences continue last_seen_idx = idx self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx]))) total_values = len(self.a) * len(self.a.dtype.fields) self.diff_ratio = float(self.diff_total) / float(total_values) def _report(self): if self.diff_column_count: self._writeln(' Tables have different number of columns:') self._writeln(' a: {}'.format(self.diff_column_count[0])) self._writeln(' b: {}'.format(self.diff_column_count[1])) if self.diff_column_names: # Show columns with names unique to either table for name in self.diff_column_names[0]: format = self.diff_columns[0][name.lower()].format self._writeln(' Extra column {} of format {} in a'.format( name, format)) for name in self.diff_column_names[1]: format = self.diff_columns[1][name.lower()].format self._writeln(' Extra column {} of format {} in b'.format( name, format)) col_attrs = dict(_COL_ATTRS) # Now go through each table again and show columns with common # names but other property differences... for col_attr, vals in self.diff_column_attributes: name, attr = col_attr self._writeln(' Column {} has different {}:'.format( name, col_attrs[attr])) report_diff_values(self._fileobj, vals[0], vals[1], ind=self._indent + 1) if self.diff_rows: self._writeln(' Table rows differ:') self._writeln(' a: {}'.format(self.diff_rows[0])) self._writeln(' b: {}'.format(self.diff_rows[1])) self._writeln(' No further data comparison performed.') return if not self.diff_values: return # Finally, let's go through and report column data differences: for indx, values in self.diff_values: self._writeln(' Column {} data differs in row {}:'.format(*indx)) report_diff_values(self._fileobj, values[0], values[1], ind=self._indent + 1) if self.diff_values and self.numdiffs < self.diff_total: self._writeln(' ...{} additional difference(s) found.'.format( (self.diff_total - self.numdiffs))) if self.diff_total > self.numdiffs: self._writeln(' ...') self._writeln(' {} different table data element(s) found ' '({:.2%} different).' .format(self.diff_total, self.diff_ratio)) def diff_values(a, b, rtol=0.0, atol=0.0): """ Diff two scalar values. If both values are floats they are compared to within the given absolute and relative tolerance. """ if isinstance(a, float) and isinstance(b, float): if np.isnan(a) and np.isnan(b): return False return not np.allclose(a, b, rtol=rtol, atol=atol) else: return a != b def report_diff_values(fileobj, a, b, ind=0): """Write a diff between two values to the specified file-like object.""" typea = type(a) typeb = type(b) if (isinstance(a, str) and not isinstance(b, str)): a = repr(a).lstrip('u') elif (isinstance(b, str) and not isinstance(a, str)): b = repr(b).lstrip('u') if isinstance(a, (int, float, complex, np.number)): a = repr(a) if isinstance(b, (int, float, complex, np.number)): b = repr(b) if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): diff_indices = np.where(a != b) num_diffs = reduce(operator.mul, map(len, diff_indices), 1) for idx in islice(zip(*diff_indices), 3): fileobj.write(indent(' at {!r}:\n'.format(list(idx)), ind)) report_diff_values(fileobj, a[idx], b[idx], ind=ind + 1) if num_diffs > 3: fileobj.write(indent(' ...and at {} more indices.\n' .format(num_diffs - 3), ind)) return padding = max(len(typea.__name__), len(typeb.__name__)) + 3 for line in difflib.ndiff(str(a).splitlines(), str(b).splitlines()): if line[0] == '-': line = 'a>' + line[1:] if typea != typeb: typename = '(' + typea.__name__ + ') ' line = typename.rjust(padding) + line elif line[0] == '+': line = 'b>' + line[1:] if typea != typeb: typename = '(' + typeb.__name__ + ') ' line = typename.rjust(padding) + line else: line = ' ' + line if typea != typeb: line = ' ' * padding + line fileobj.write(indent(' {}\n'.format(line.rstrip('\n')), ind)) def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0): """ Write a diff between two header keyword values or comments to the specified file-like object. """ if keyword in diffs: vals = diffs[keyword] for idx, val in enumerate(vals): if val is None: continue if idx == 0: dup = '' else: dup = '[{}]'.format(idx + 1) fileobj.write(indent(' Keyword {:8}{} has different {}:\n' .format(keyword, dup, attr), ind)) report_diff_values(fileobj, val[0], val[1], ind=ind + 1) def where_not_allclose(a, b, rtol=1e-5, atol=1e-8): """ A version of numpy.allclose that returns the indices where the two arrays differ, instead of just a boolean value. """ # Create fixed mask arrays to handle INF and NaN; currently INF and NaN # are handled as equivalent if not np.all(np.isfinite(a)): a = np.ma.fix_invalid(a).data if not np.all(np.isfinite(b)): b = np.ma.fix_invalid(b).data if atol == 0.0 and rtol == 0.0: # Use a faster comparison for the most simple (and common) case return np.where(a != b) return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
e7b663c013ee5bdff8466fb9a732f32c9aaf64e210203879c4b8eaf6c8b5ae09
# Licensed under a 3-clause BSD style license - see PYFITS.rst """ A package for reading and writing FITS files and manipulating their contents. A module for reading and writing Flexible Image Transport System (FITS) files. This file format was endorsed by the International Astronomical Union in 1999 and mandated by NASA as the standard format for storing high energy astrophysics data. For details of the FITS standard, see the NASA/Science Office of Standards and Technology publication, NOST 100-2.0. """ from ... import config as _config # Set module-global boolean variables # TODO: Make it possible to set these variables via environment variables # again, once support for that is added to Astropy class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.io.fits`. """ enable_record_valued_keyword_cards = _config.ConfigItem( True, 'If True, enable support for record-valued keywords as described by ' 'FITS WCS distortion paper. Otherwise they are treated as normal ' 'keywords.', aliases=['astropy.io.fits.enabled_record_valued_keyword_cards']) extension_name_case_sensitive = _config.ConfigItem( False, 'If True, extension names (i.e. the ``EXTNAME`` keyword) should be ' 'treated as case-sensitive.') strip_header_whitespace = _config.ConfigItem( True, 'If True, automatically remove trailing whitespace for string values in ' 'headers. Otherwise the values are returned verbatim, with all ' 'whitespace intact.') use_memmap = _config.ConfigItem( True, 'If True, use memory-mapped file access to read/write the data in ' 'FITS files. This generally provides better performance, especially ' 'for large files, but may affect performance in I/O-heavy ' 'applications.') lazy_load_hdus = _config.ConfigItem( True, 'If True, use lazy loading of HDUs when opening FITS files by ' 'default; that is fits.open() will only seek for and read HDUs on ' 'demand rather than reading all HDUs at once. See the documentation ' 'for fits.open() for more datails.') enable_uint = _config.ConfigItem( True, 'If True, default to recognizing the convention for representing ' 'unsigned integers in FITS--if an array has BITPIX > 0, BSCALE = 1, ' 'and BZERO = 2**BITPIX, represent the data as unsigned integers ' 'per this convention.') conf = Conf() # Public API compatibility imports # These need to come after the global config variables, as some of the # submodules use them from . import card from . import column from . import convenience from . import hdu from .card import * from .column import * from .convenience import * from .diff import * from .fitsrec import FITS_record, FITS_rec from .hdu import * from .hdu.groups import GroupData from .hdu.hdulist import fitsopen as open from .hdu.image import Section from .header import Header from .verify import VerifyError __all__ = (['Conf', 'conf'] + card.__all__ + column.__all__ + convenience.__all__ + hdu.__all__ + ['FITS_record', 'FITS_rec', 'GroupData', 'open', 'Section', 'Header', 'VerifyError', 'conf'])
77030c4bf69b9dd728d9bd2a8d894346af9c4f4f1109fa681ef924fa1f2a3188
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import re import sys import warnings import weakref import numbers from functools import reduce from collections import OrderedDict from contextlib import suppress import numpy as np from numpy import char as chararray from . import _numpy_hacks as nh from .card import Card, CARD_LENGTH from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp, NotifierMixin) from .verify import VerifyError, VerifyWarning from ...utils import lazyproperty, isiterable, indent __all__ = ['Column', 'ColDefs', 'Delayed'] # mapping from TFORM data type to numpy data type (code) # L: Logical (Boolean) # B: Unsigned Byte # I: 16-bit Integer # J: 32-bit Integer # K: 64-bit Integer # E: Single-precision Floating Point # D: Double-precision Floating Point # C: Single-precision Complex # M: Double-precision Complex # A: Character FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4', 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'} # the inverse dictionary of the above NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()} # Normally booleans are represented as ints in Astropy, but if passed in a numpy # boolean array, that should be supported NUMPY2FITS['b1'] = 'L' # Add unsigned types, which will be stored as signed ints with a TZERO card. NUMPY2FITS['u2'] = 'I' NUMPY2FITS['u4'] = 'J' NUMPY2FITS['u8'] = 'K' # Add half precision floating point numbers which will be up-converted to # single precision. NUMPY2FITS['f2'] = 'E' # This is the order in which values are converted to FITS types # Note that only double precision floating point/complex are supported FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A'] # Convert single precision floating point/complex to double precision. FITSUPCONVERTERS = {'E': 'D', 'C': 'M'} # mapping from ASCII table TFORM data type to numpy data type # A: Character # I: Integer (32-bit) # J: Integer (64-bit; non-standard) # F: Float (64-bit; fixed decimal notation) # E: Float (64-bit; exponential notation) # D: Float (64-bit; exponential notation, always 64-bit by convention) ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'} # Maps FITS ASCII column format codes to the appropriate Python string # formatting codes for that type. ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'} # For each ASCII table format code, provides a default width (and decimal # precision) for when one isn't given explicitly in the column format ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0), 'E': (15, 7), 'F': (16, 7), 'D': (25, 17)} # tuple of column/field definition common names and keyword names, make # sure to preserve the one-to-one correspondence when updating the list(s). # Use lists, instead of dictionaries so the names can be displayed in a # preferred order. KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS') KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim', 'coord_type', 'coord_unit', 'coord_ref_point', 'coord_ref_value', 'coord_inc', 'time_ref_pos') """This is a list of the attributes that can be set on `Column` objects.""" KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES)) ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES)) # TODO: Define a list of default comments to associate with each table keyword # TFORMn regular expression TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])' r'(?P<option>[!-~]*)', re.I) # TFORMn for ASCII tables; two different versions depending on whether # the format is floating-point or not; allows empty values for width # in which case defaults are used TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|' r'(?:(?P<formatf>[FED])' r'(?:(?P<widthf>[0-9]+)\.' r'(?P<precision>[0-9]+))?)') TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+') """ Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2. """ # table definition keyword regular expression TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') # table dimension keyword regular expression (fairly flexible with whitespace) TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+,\s*)+\s*\d+)\s*\)\s*') # value for ASCII table cell with value = TNULL # this can be reset by user. ASCIITNULL = 0 # The default placeholder to use for NULL values in ASCII tables when # converting from binary to ASCII tables DEFAULT_ASCII_TNULL = '---' class Delayed: """Delayed file-reading data.""" def __init__(self, hdu=None, field=None): self.hdu = weakref.proxy(hdu) self.field = field def __getitem__(self, key): # This forces the data for the HDU to be read, which will replace # the corresponding Delayed objects in the Tables Columns to be # transformed into ndarrays. It will also return the value of the # requested data element. return self.hdu.data[key][self.field] class _BaseColumnFormat(str): """ Base class for binary table column formats (just called _ColumnFormat) and ASCII table column formats (_AsciiColumnFormat). """ def __eq__(self, other): if not other: return False if isinstance(other, str): if not isinstance(other, self.__class__): try: other = self.__class__(other) except ValueError: return False else: return False return self.canonical == other.canonical def __hash__(self): return hash(self.canonical) @lazyproperty def dtype(self): """ The Numpy dtype object created from the format's associated recformat. """ return np.dtype(self.recformat) @classmethod def from_column_format(cls, format): """Creates a column format object from another column format object regardless of their type. That is, this can convert a _ColumnFormat to an _AsciiColumnFormat or vice versa at least in cases where a direct translation is possible. """ return cls.from_recformat(format.recformat) class _ColumnFormat(_BaseColumnFormat): """ Represents a FITS binary table column format. This is an enhancement over using a normal string for the format, since the repeat count, format code, and option are available as separate attributes, and smart comparison is used. For example 1J == J. """ def __new__(cls, format): self = super().__new__(cls, format) self.repeat, self.format, self.option = _parse_tformat(format) self.format = self.format.upper() if self.format in ('P', 'Q'): # TODO: There should be a generic factory that returns either # _FormatP or _FormatQ as appropriate for a given TFORMn if self.format == 'P': recformat = _FormatP.from_tform(format) else: recformat = _FormatQ.from_tform(format) # Format of variable length arrays self.p_format = recformat.format else: self.p_format = None return self @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of rTa where T is the single character data type code, a is the optional part, and r is the repeat. If repeat == 1 (the default) it is left out of this representation. """ if self.repeat == 1: repeat = '' else: repeat = str(self.repeat) return '{}{}{}'.format(repeat, self.format, self.option) class _AsciiColumnFormat(_BaseColumnFormat): """Similar to _ColumnFormat but specifically for columns in ASCII tables. The formats of ASCII table columns and binary table columns are inherently incompatible in FITS. They don't support the same ranges and types of values, and even reuse format codes in subtly different ways. For example the format code 'Iw' in ASCII columns refers to any integer whose string representation is at most w characters wide, so 'I' can represent effectively any integer that will fit in a FITS columns. Whereas for binary tables 'I' very explicitly refers to a 16-bit signed integer. Conversions between the two column formats can be performed using the ``to/from_binary`` methods on this class, or the ``to/from_ascii`` methods on the `_ColumnFormat` class. But again, not all conversions are possible and may result in a `ValueError`. """ def __new__(cls, format, strict=False): self = super().__new__(cls, format) self.format, self.width, self.precision = \ _parse_ascii_tformat(format, strict) # This is to support handling logical (boolean) data from binary tables # in an ASCII table self._pseudo_logical = False return self @classmethod def from_column_format(cls, format): inst = cls.from_recformat(format.recformat) # Hack if format.format == 'L': inst._pseudo_logical = True return inst @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_ascii_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_ascii_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of Tw.d where T is the single character data type code, w is the width in characters for this field, and d is the number of digits after the decimal place (for format codes 'E', 'F', and 'D' only). """ if self.format in ('E', 'F', 'D'): return '{}{}.{}'.format(self.format, self.width, self.precision) return '{}{}'.format(self.format, self.width) class _FormatX(str): """For X format in binary tables.""" def __new__(cls, repeat=1): nbytes = ((repeat - 1) // 8) + 1 # use an array, even if it is only ONE u1 (i.e. use tuple always) obj = super().__new__(cls, repr((nbytes,)) + 'u1') obj.repeat = repeat return obj def __getnewargs__(self): return (self.repeat,) @property def tform(self): return '{}X'.format(self.repeat) # TODO: Table column formats need to be verified upon first reading the file; # as it is, an invalid P format will raise a VerifyError from some deep, # unexpected place class _FormatP(str): """For P format in variable length table.""" # As far as I can tell from my reading of the FITS standard, a type code is # *required* for P and Q formats; there is no default _format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])' r'(?:\((?P<max>\d*)\))?') _format_code = 'P' _format_re = re.compile(_format_re_template.format(_format_code)) _descriptor_format = '2i4' def __new__(cls, dtype, repeat=None, max=None): obj = super().__new__(cls, cls._descriptor_format) obj.format = NUMPY2FITS[dtype] obj.dtype = dtype obj.repeat = repeat obj.max = max return obj def __getnewargs__(self): return (self.dtype, self.repeat, self.max) @classmethod def from_tform(cls, format): m = cls._format_re.match(format) if not m or m.group('dtype') not in FITS2NUMPY: raise VerifyError('Invalid column format: {}'.format(format)) repeat = m.group('repeat') array_dtype = m.group('dtype') max = m.group('max') if not max: max = None return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max) @property def tform(self): repeat = '' if self.repeat is None else self.repeat max = '' if self.max is None else self.max return '{}{}{}({})'.format(repeat, self._format_code, self.format, max) class _FormatQ(_FormatP): """Carries type description of the Q format for variable length arrays. The Q format is like the P format but uses 64-bit integers in the array descriptors, allowing for heaps stored beyond 2GB into a file. """ _format_code = 'Q' _format_re = re.compile(_FormatP._format_re_template.format(_format_code)) _descriptor_format = '2i8' class ColumnAttribute: """ Descriptor for attributes of `Column` that are associated with keywords in the FITS header and describe properties of the column as specified in the FITS standard. Each `ColumnAttribute` may have a ``validator`` method defined on it. This validates values set on this attribute to ensure that they meet the FITS standard. Invalid values will raise a warning and will not be used in formatting the column. The validator should take two arguments--the `Column` it is being assigned to, and the new value for the attribute, and it must raise an `AssertionError` if the value is invalid. The `ColumnAttribute` itself is a decorator that can be used to define the ``validator`` for each column attribute. For example:: @ColumnAttribute('TTYPE') def name(col, name): if not isinstance(name, str): raise AssertionError The actual object returned by this decorator is the `ColumnAttribute` instance though, not the ``name`` function. As such ``name`` is not a method of the class it is defined in. The setter for `ColumnAttribute` also updates the header of any table HDU this column is attached to in order to reflect the change. The ``validator`` should ensure that the value is valid for inclusion in a FITS header. """ def __init__(self, keyword): self._keyword = keyword self._validator = None # The name of the attribute associated with this keyword is currently # determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be # make more flexible in the future, for example, to support custom # column attributes. self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword] def __get__(self, obj, objtype=None): if obj is None: return self else: return getattr(obj, self._attr) def __set__(self, obj, value): if self._validator is not None: self._validator(obj, value) old_value = getattr(obj, self._attr, None) setattr(obj, self._attr, value) obj._notify('column_attribute_changed', obj, self._attr[1:], old_value, value) def __call__(self, func): """ Set the validator for this column attribute. Returns ``self`` so that this can be used as a decorator, as described in the docs for this class. """ self._validator = func return self def __repr__(self): return "{0}('{1}')".format(self.__class__.__name__, self._keyword) class Column(NotifierMixin): """ Class which contains the definition of one column, e.g. ``ttype``, ``tform``, etc. and the array containing values for the column. """ def __init__(self, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, array=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Construct a `Column` by specifying attributes. All attributes except ``format`` can be optional; see :ref:`column_creation` and :ref:`creating_ascii_table` for more information regarding ``TFORM`` keyword. Parameters ---------- name : str, optional column name, corresponding to ``TTYPE`` keyword format : str column format, corresponding to ``TFORM`` keyword unit : str, optional column unit, corresponding to ``TUNIT`` keyword null : str, optional null value, corresponding to ``TNULL`` keyword bscale : int-like, optional bscale value, corresponding to ``TSCAL`` keyword bzero : int-like, optional bzero value, corresponding to ``TZERO`` keyword disp : str, optional display format, corresponding to ``TDISP`` keyword start : int, optional column starting position (ASCII table only), corresponding to ``TBCOL`` keyword dim : str, optional column dimension corresponding to ``TDIM`` keyword array : iterable, optional a `list`, `numpy.ndarray` (or other iterable that can be used to initialize an ndarray) providing initial data for this column. The array will be automatically converted, if possible, to the data format of the column. In the case were non-trivial ``bscale`` and/or ``bzero`` arguments are given, the values in the array must be the *physical* values--that is, the values of column as if the scaling has already been applied (the array stored on the column object will then be converted back to its storage values). ascii : bool, optional set `True` if this describes a column for an ASCII table; this may be required to disambiguate the column format coord_type : str, optional coordinate/axis type corresponding to ``TCTYP`` keyword coord_unit : str, optional coordinate/axis unit corresponding to ``TCUNI`` keyword coord_ref_point : int-like, optional pixel coordinate of the reference point corresponding to ``TCRPX`` keyword coord_ref_value : int-like, optional coordinate value at reference point corresponding to ``TCRVL`` keyword coord_inc : int-like, optional coordinate increment at reference point corresponding to ``TCDLT`` keyword time_ref_pos : str, optional reference position for a time coordinate column corresponding to ``TRPOS`` keyword """ if format is None: raise ValueError('Must specify format to construct Column.') # any of the input argument (except array) can be a Card or just # a number/string kwargs = {'ascii': ascii} for attr in KEYWORD_ATTRIBUTES: value = locals()[attr] # get the argument's value if isinstance(value, Card): value = value.value kwargs[attr] = value valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs) if invalid_kwargs: msg = ['The following keyword arguments to Column were invalid:'] for val in invalid_kwargs.values(): msg.append(indent(val[1])) raise VerifyError('\n'.join(msg)) for attr in KEYWORD_ATTRIBUTES: setattr(self, attr, valid_kwargs.get(attr)) # TODO: Try to eliminate the following two special cases # for recformat and dim: # This is not actually stored as an attribute on columns for some # reason recformat = valid_kwargs['recformat'] # The 'dim' keyword's original value is stored in self.dim, while # *only* the tuple form is stored in self._dims. self._dims = self.dim self.dim = dim # Awful hack to use for now to keep track of whether the column holds # pseudo-unsigned int data self._pseudo_unsigned_ints = False # if the column data is not ndarray, make it to be one, i.e. # input arrays can be just list or tuple, not required to be ndarray # does not include Object array because there is no guarantee # the elements in the object array are consistent. if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)): try: # try to convert to a ndarray first if array is not None: array = np.array(array) except Exception: try: # then try to convert it to a strings array itemsize = int(recformat[1:]) array = chararray.array(array, itemsize=itemsize) except ValueError: # then try variable length array # Note: This includes _FormatQ by inheritance if isinstance(recformat, _FormatP): array = _VLF(array, dtype=recformat.dtype) else: raise ValueError('Data is inconsistent with the ' 'format `{}`.'.format(format)) array = self._convert_to_valid_data_type(array) # We have required (through documentation) that arrays passed in to # this constructor are already in their physical values, so we make # note of that here if isinstance(array, np.ndarray): self._physical_values = True else: self._physical_values = False self._parent_fits_rec = None self.array = array def __repr__(self): text = '' for attr in KEYWORD_ATTRIBUTES: value = getattr(self, attr) if value is not None: text += attr + ' = ' + repr(value) + '; ' return text[:-2] def __eq__(self, other): """ Two columns are equal if their name and format are the same. Other attributes aren't taken into account at this time. """ # According to the FITS standard column names must be case-insensitive a = (self.name.lower(), self.format) b = (other.name.lower(), other.format) return a == b def __hash__(self): """ Like __eq__, the hash of a column should be based on the unique column name and format, and be case-insensitive with respect to the column name. """ return hash((self.name.lower(), self.format)) @property def array(self): """ The Numpy `~numpy.ndarray` associated with this `Column`. If the column was instantiated with an array passed to the ``array`` argument, this will return that array. However, if the column is later added to a table, such as via `BinTableHDU.from_columns` as is typically the case, this attribute will be updated to reference the associated field in the table, which may no longer be the same array. """ # Ideally the .array attribute never would have existed in the first # place, or would have been internal-only. This is a legacy of the # older design from Astropy that needs to have continued support, for # now. # One of the main problems with this design was that it created a # reference cycle. When the .array attribute was updated after # creating a FITS_rec from the column (as explained in the docstring) a # reference cycle was created. This is because the code in BinTableHDU # (and a few other places) does essentially the following: # # data._coldefs = columns # The ColDefs object holding this Column # for col in columns: # col.array = data.field(col.name) # # This way each columns .array attribute now points to the field in the # table data. It's actually a pretty confusing interface (since it # replaces the array originally pointed to by .array), but it's the way # things have been for a long, long time. # # However, this results, in *many* cases, in a reference cycle. # Because the array returned by data.field(col.name), while sometimes # an array that owns its own data, is usually like a slice of the # original data. It has the original FITS_rec as the array .base. # This results in the following reference cycle (for the n-th column): # # data -> data._coldefs -> data._coldefs[n] -> # data._coldefs[n].array -> data._coldefs[n].array.base -> data # # Because ndarray objects do not handled by Python's garbage collector # the reference cycle cannot be broken. Therefore the FITS_rec's # refcount never goes to zero, its __del__ is never called, and its # memory is never freed. This didn't occur in *all* cases, but it did # occur in many cases. # # To get around this, Column.array is no longer a simple attribute # like it was previously. Now each Column has a ._parent_fits_rec # attribute which is a weakref to a FITS_rec object. Code that # previously assigned each col.array to field in a FITS_rec (as in # the example a few paragraphs above) is still used, however now # array.setter checks if a reference cycle will be created. And if # so, instead of saving directly to the Column's __dict__, it creates # the ._prent_fits_rec weakref, and all lookups of the column's .array # go through that instead. # # This alone does not fully solve the problem. Because # _parent_fits_rec is a weakref, if the user ever holds a reference to # the Column, but deletes all references to the underlying FITS_rec, # the .array attribute would suddenly start returning None instead of # the array data. This problem is resolved on FITS_rec's end. See the # note in the FITS_rec._coldefs property for the rest of the story. # If the Columns's array is not a reference to an existing FITS_rec, # then it is just stored in self.__dict__; otherwise check the # _parent_fits_rec reference if it 's still available. if 'array' in self.__dict__: return self.__dict__['array'] elif self._parent_fits_rec is not None: parent = self._parent_fits_rec() if parent is not None: return parent[self.name] else: return None @array.setter def array(self, array): # The following looks over the bases of the given array to check if it # has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs # contains this Column itself, and would create a reference cycle if we # stored the array directly in self.__dict__. # In this case it instead sets up the _parent_fits_rec weakref to the # underlying FITS_rec, so that array.getter can return arrays through # self._parent_fits_rec().field(self.name), rather than storing a # hard reference to the field like it used to. base = array while True: if (hasattr(base, '_coldefs') and isinstance(base._coldefs, ColDefs)): for col in base._coldefs: if col is self and self._parent_fits_rec is None: self._parent_fits_rec = weakref.ref(base) # Just in case the user already set .array to their own # array. if 'array' in self.__dict__: del self.__dict__['array'] return if getattr(base, 'base', None) is not None: base = base.base else: break self.__dict__['array'] = array @array.deleter def array(self): try: del self.__dict__['array'] except KeyError: pass self._parent_fits_rec = None @ColumnAttribute('TTYPE') def name(col, name): if name is None: # Allow None to indicate deleting the name, or to just indicate an # unspecified name (when creating a new Column). return # Check that the name meets the recommended standard--other column # names are *allowed*, but will be discouraged if isinstance(name, str) and not TTYPE_RE.match(name): warnings.warn( 'It is strongly recommended that column names contain only ' 'upper and lower-case ASCII letters, digits, or underscores ' 'for maximum compatibility with other software ' '(got {0!r}).'.format(name), VerifyWarning) # This ensures that the new name can fit into a single FITS card # without any special extension like CONTINUE cards or the like. if (not isinstance(name, str) or len(str(Card('TTYPE', name))) != CARD_LENGTH): raise AssertionError( 'Column name must be a string able to fit in a single ' 'FITS card--typically this means a maximum of 68 ' 'characters, though it may be fewer if the string ' 'contains special characters like quotes.') @ColumnAttribute('TCTYP') def coord_type(col, coord_type): if coord_type is None: return if (not isinstance(coord_type, str) or len(coord_type) > 8): raise AssertionError( 'Coordinate/axis type must be a string of atmost 8 ' 'characters.') @ColumnAttribute('TCUNI') def coord_unit(col, coord_unit): if (coord_unit is not None and not isinstance(coord_unit, str)): raise AssertionError( 'Coordinate/axis unit must be a string.') @ColumnAttribute('TCRPX') def coord_ref_point(col, coord_ref_point): if (coord_ref_point is not None and not isinstance(coord_ref_point, numbers.Real)): raise AssertionError( 'Pixel coordinate of the reference point must be ' 'real floating type.') @ColumnAttribute('TCRVL') def coord_ref_value(col, coord_ref_value): if (coord_ref_value is not None and not isinstance(coord_ref_value, numbers.Real)): raise AssertionError( 'Coordinate value at reference point must be real ' 'floating type.') @ColumnAttribute('TCDLT') def coord_inc(col, coord_inc): if (coord_inc is not None and not isinstance(coord_inc, numbers.Real)): raise AssertionError( 'Coordinate increment must be real floating type.') @ColumnAttribute('TRPOS') def time_ref_pos(col, time_ref_pos): if (time_ref_pos is not None and not isinstance(time_ref_pos, str)): raise AssertionError( 'Time reference position must be a string.') format = ColumnAttribute('TFORM') unit = ColumnAttribute('TUNIT') null = ColumnAttribute('TNULL') bscale = ColumnAttribute('TSCAL') bzero = ColumnAttribute('TZERO') disp = ColumnAttribute('TDISP') start = ColumnAttribute('TBCOL') dim = ColumnAttribute('TDIM') @lazyproperty def ascii(self): """Whether this `Column` represents a column in an ASCII table.""" return isinstance(self.format, _AsciiColumnFormat) @lazyproperty def dtype(self): return self.format.dtype def copy(self): """ Return a copy of this `Column`. """ tmp = Column(format='I') # just use a throw-away format tmp.__dict__ = self.__dict__.copy() return tmp @staticmethod def _convert_format(format, cls): """The format argument to this class's initializer may come in many forms. This uses the given column format class ``cls`` to convert to a format of that type. TODO: There should be an abc base class for column format classes """ # Short circuit in case we're already a _BaseColumnFormat--there is at # least one case in which this can happen if isinstance(format, _BaseColumnFormat): return format, format.recformat if format in NUMPY2FITS: with suppress(VerifyError): # legit recarray format? recformat = format format = cls.from_recformat(format) try: # legit FITS format? format = cls(format) recformat = format.recformat except VerifyError: raise VerifyError('Illegal format `{}`.'.format(format)) return format, recformat @classmethod def _verify_keywords(cls, name=None, format=None, unit=None, null=None, bscale=None, bzero=None, disp=None, start=None, dim=None, ascii=None, coord_type=None, coord_unit=None, coord_ref_point=None, coord_ref_value=None, coord_inc=None, time_ref_pos=None): """ Given the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), verify that each keyword has a valid value. Returns a 2-tuple of dicts. The first maps valid keywords to their values. The second maps invalid keywords to a 2-tuple of their value, and a message explaining why they were found invalid. """ valid = {} invalid = {} format, recformat = cls._determine_formats(format, start, dim, ascii) valid.update(format=format, recformat=recformat) # Currently we don't have any validation for name, unit, bscale, or # bzero so include those by default # TODO: Add validation for these keywords, obviously for k, v in [('name', name), ('unit', unit), ('bscale', bscale), ('bzero', bzero)]: if v is not None and v != '': valid[k] = v # Validate null option # Note: Enough code exists that thinks empty strings are sensible # inputs for these options that we need to treat '' as None if null is not None and null != '': msg = None if isinstance(format, _AsciiColumnFormat): null = str(null) if len(null) > format.width: msg = ( "ASCII table null option (TNULLn) is longer than " "the column's character width and will be truncated " "(got {!r}).".format(null)) else: tnull_formats = ('B', 'I', 'J', 'K') if not _is_int(null): # Make this an exception instead of a warning, since any # non-int value is meaningless msg = ( 'Column null option (TNULLn) must be an integer for ' 'binary table columns (got {!r}). The invalid value ' 'will be ignored for the purpose of formatting ' 'the data in this column.'.format(null)) elif not (format.format in tnull_formats or (format.format in ('P', 'Q') and format.p_format in tnull_formats)): # TODO: We should also check that TNULLn's integer value # is in the range allowed by the column's format msg = ( 'Column null option (TNULLn) is invalid for binary ' 'table columns of type {!r} (got {!r}). The invalid ' 'value will be ignored for the purpose of formatting ' 'the data in this column.'.format(format, null)) if msg is None: valid['null'] = null else: invalid['null'] = (null, msg) # Validate the disp option # TODO: Add full parsing and validation of TDISPn keywords if disp is not None and disp != '': msg = None if not isinstance(disp, str): msg = ( 'Column disp option (TDISPn) must be a string (got {!r}).' 'The invalid value will be ignored for the purpose of ' 'formatting the data in this column.'.format(disp)) elif (isinstance(format, _AsciiColumnFormat) and disp[0].upper() == 'L'): # disp is at least one character long and has the 'L' format # which is not recognized for ASCII tables msg = ( "Column disp option (TDISPn) may not use the 'L' format " "with ASCII table columns. The invalid value will be " "ignored for the purpose of formatting the data in this " "column.") if msg is None: valid['disp'] = disp else: invalid['disp'] = (disp, msg) # Validate the start option if start is not None and start != '': msg = None if not isinstance(format, _AsciiColumnFormat): # The 'start' option only applies to ASCII columns msg = ( 'Column start option (TBCOLn) is not allowed for binary ' 'table columns (got {!r}). The invalid keyword will be ' 'ignored for the purpose of formatting the data in this ' 'column.'.format(start)) else: try: start = int(start) except (TypeError, ValueError): pass if not _is_int(start) or start < 1: msg = ( 'Column start option (TBCOLn) must be a positive integer ' '(got {!r}). The invalid value will be ignored for the ' 'purpose of formatting the data in this column.'.format(start)) if msg is None: valid['start'] = start else: invalid['start'] = (start, msg) # Process TDIMn options # ASCII table columns can't have a TDIMn keyword associated with it; # for now we just issue a warning and ignore it. # TODO: This should be checked by the FITS verification code if dim is not None and dim != '': msg = None dims_tuple = tuple() # NOTE: If valid, the dim keyword's value in the the valid dict is # a tuple, not the original string; if invalid just the original # string is returned if isinstance(format, _AsciiColumnFormat): msg = ( 'Column dim option (TDIMn) is not allowed for ASCII table ' 'columns (got {!r}). The invalid keyword will be ignored ' 'for the purpose of formatting this column.'.format(dim)) elif isinstance(dim, str): dims_tuple = _parse_tdim(dim) elif isinstance(dim, tuple): dims_tuple = dim else: msg = ( "`dim` argument must be a string containing a valid value " "for the TDIMn header keyword associated with this column, " "or a tuple containing the C-order dimensions for the " "column. The invalid value will be ignored for the purpose " "of formatting this column.") if dims_tuple: if reduce(operator.mul, dims_tuple) > format.repeat: msg = ( "The repeat count of the column format {!r} for column {!r} " "is fewer than the number of elements per the TDIM " "argument {!r}. The invalid TDIMn value will be ignored " "for the purpose of formatting this column.".format( name, format, dim)) if msg is None: valid['dim'] = dims_tuple else: invalid['dim'] = (dim, msg) if coord_type is not None and coord_type != '': msg = None if not isinstance(coord_type, str): msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_type)) elif len(coord_type) > 8: msg = ( "Coordinate/axis type option (TCTYPn) must be a string " "of atmost 8 characters (got {!r}). The invalid keyword " "will be ignored for the purpose of formatting this " "column.".format(coord_type)) if msg is None: valid['coord_type'] = coord_type else: invalid['coord_type'] = (coord_type, msg) if coord_unit is not None and coord_unit != '': msg = None if not isinstance(coord_unit, str): msg = ( "Coordinate/axis unit option (TCUNIn) must be a string " "(got {!r}). The invalid keyword will be ignored for the " "purpose of formatting this column.".format(coord_unit)) if msg is None: valid['coord_unit'] = coord_unit else: invalid['coord_unit'] = (coord_unit, msg) for k, v in [('coord_ref_point', coord_ref_point), ('coord_ref_value', coord_ref_value), ('coord_inc', coord_inc)]: if v is not None and v != '': msg = None if not isinstance(v, numbers.Real): msg = ( "Column {} option ({}n) must be a real floating type (got {!r}). " "The invalid value will be ignored for the purpose of formatting " "the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v)) if msg is None: valid[k] = v else: invalid[k] = (v, msg) if time_ref_pos is not None and time_ref_pos != '': msg=None if not isinstance(time_ref_pos, str): msg = ( "Time coordinate reference position option (TRPOSn) must be " "a string (got {!r}). The invalid keyword will be ignored for " "the purpose of formatting this column.".format(time_ref_pos)) if msg is None: valid['time_ref_pos'] = time_ref_pos else: invalid['time_ref_pos'] = (time_ref_pos, msg) return valid, invalid @classmethod def _determine_formats(cls, format, start, dim, ascii): """ Given a format string and whether or not the Column is for an ASCII table (ascii=None means unspecified, but lean toward binary table where ambiguous) create an appropriate _BaseColumnFormat instance for the column's format, and determine the appropriate recarray format. The values of the start and dim keyword arguments are also useful, as the former is only valid for ASCII tables and the latter only for BINARY tables. """ # If the given format string is unambiguously a Numpy dtype or one of # the Numpy record format type specifiers supported by Astropy then that # should take priority--otherwise assume it is a FITS format if isinstance(format, np.dtype): format, _, _ = _dtype_to_recformat(format) # check format if ascii is None and not isinstance(format, _BaseColumnFormat): # We're just give a string which could be either a Numpy format # code, or a format for a binary column array *or* a format for an # ASCII column array--there may be many ambiguities here. Try our # best to guess what the user intended. format, recformat = cls._guess_format(format, start, dim) elif not ascii and not isinstance(format, _BaseColumnFormat): format, recformat = cls._convert_format(format, _ColumnFormat) elif ascii and not isinstance(format, _AsciiColumnFormat): format, recformat = cls._convert_format(format, _AsciiColumnFormat) else: # The format is already acceptable and unambiguous recformat = format.recformat return format, recformat @classmethod def _guess_format(cls, format, start, dim): if start and dim: # This is impossible; this can't be a valid FITS column raise ValueError( 'Columns cannot have both a start (TCOLn) and dim ' '(TDIMn) option, since the former is only applies to ' 'ASCII tables, and the latter is only valid for binary ' 'tables.') elif start: # Only ASCII table columns can have a 'start' option guess_format = _AsciiColumnFormat elif dim: # Only binary tables can have a dim option guess_format = _ColumnFormat else: # If the format is *technically* a valid binary column format # (i.e. it has a valid format code followed by arbitrary # "optional" codes), but it is also strictly a valid ASCII # table format, then assume an ASCII table column was being # requested (the more likely case, after all). with suppress(VerifyError): format = _AsciiColumnFormat(format, strict=True) # A safe guess which reflects the existing behavior of previous # Astropy versions guess_format = _ColumnFormat try: format, recformat = cls._convert_format(format, guess_format) except VerifyError: # For whatever reason our guess was wrong (for example if we got # just 'F' that's not a valid binary format, but it an ASCII format # code albeit with the width/precision omitted guess_format = (_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat) # If this fails too we're out of options--it is truly an invalid # format, or at least not supported format, recformat = cls._convert_format(format, guess_format) return format, recformat def _convert_to_valid_data_type(self, array): # Convert the format to a type we understand if isinstance(array, Delayed): return array elif array is None: return array else: format = self.format dims = self._dims if dims: shape = dims[:-1] if 'A' in format else dims shape = (len(array),) + shape array = array.reshape(shape) if 'P' in format or 'Q' in format: return array elif 'A' in format: if array.dtype.char in 'SU': if dims: # The 'last' dimension (first in the order given # in the TDIMn keyword itself) is the number of # characters in each string fsize = dims[-1] else: fsize = np.dtype(format.recformat).itemsize return chararray.array(array, itemsize=fsize, copy=False) else: return _convert_array(array, np.dtype(format.recformat)) elif 'L' in format: # boolean needs to be scaled back to storage values ('T', 'F') if array.dtype == np.dtype('bool'): return np.where(array == np.False_, ord('F'), ord('T')) else: return np.where(array == 0, ord('F'), ord('T')) elif 'X' in format: return _convert_array(array, np.dtype('uint8')) else: # Preserve byte order of the original array for now; see #77 numpy_format = array.dtype.byteorder + format.recformat # Handle arrays passed in as unsigned ints as pseudo-unsigned # int arrays; blatantly tacked in here for now--we need columns # to have explicit knowledge of whether they treated as # pseudo-unsigned bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31), 8: np.uint64(2**63)} if (array.dtype.kind == 'u' and array.dtype.itemsize in bzeros and self.bscale in (1, None, '') and self.bzero == bzeros[array.dtype.itemsize]): # Basically the array is uint, has scale == 1.0, and the # bzero is the appropriate value for a pseudo-unsigned # integer of the input dtype, then go ahead and assume that # uint is assumed numpy_format = numpy_format.replace('i', 'u') self._pseudo_unsigned_ints = True # The .base here means we're dropping the shape information, # which is only used to format recarray fields, and is not # useful for converting input arrays to the correct data type dtype = np.dtype(numpy_format).base return _convert_array(array, dtype) class ColDefs(NotifierMixin): """ Column definitions class. It has attributes corresponding to the `Column` attributes (e.g. `ColDefs` has the attribute ``names`` while `Column` has ``name``). Each attribute in `ColDefs` is a list of corresponding attribute values from all `Column` objects. """ _padding_byte = '\x00' _col_format_cls = _ColumnFormat def __new__(cls, input, ascii=False): klass = cls if (hasattr(input, '_columns_type') and issubclass(input._columns_type, ColDefs)): klass = input._columns_type elif (hasattr(input, '_col_format_cls') and issubclass(input._col_format_cls, _AsciiColumnFormat)): klass = _AsciiColDefs if ascii: # force ASCII if this has been explicitly requested klass = _AsciiColDefs return object.__new__(klass) def __getnewargs__(self): return (self._arrays,) def __init__(self, input, ascii=False): """ Parameters ---------- input : sequence of `Column`, `ColDefs`, other An existing table HDU, an existing `ColDefs`, or any multi-field Numpy array or `numpy.recarray`. ascii : bool Use True to ensure that ASCII table columns are used. """ from .hdu.table import _TableBaseHDU from .fitsrec import FITS_rec if isinstance(input, ColDefs): self._init_from_coldefs(input) elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and input._coldefs): # If given a FITS_rec object we can directly copy its columns, but # only if its columns have already been defined, otherwise this # will loop back in on itself and blow up self._init_from_coldefs(input._coldefs) elif isinstance(input, np.ndarray) and input.dtype.fields is not None: # Construct columns from the fields of a record array self._init_from_array(input) elif isiterable(input): # if the input is a list of Columns self._init_from_sequence(input) elif isinstance(input, _TableBaseHDU): # Construct columns from fields in an HDU header self._init_from_table(input) else: raise TypeError('Input to ColDefs must be a table HDU, a list ' 'of Columns, or a record/field array.') # Listen for changes on all columns for col in self.columns: col._add_listener(self) def _init_from_coldefs(self, coldefs): """Initialize from an existing ColDefs object (just copy the columns and convert their formats if necessary). """ self.columns = [self._copy_column(col) for col in coldefs] def _init_from_sequence(self, columns): for idx, col in enumerate(columns): if not isinstance(col, Column): raise TypeError('Element {} in the ColDefs input is not a ' 'Column.'.format(idx)) self._init_from_coldefs(columns) def _init_from_array(self, array): self.columns = [] for idx in range(len(array.dtype)): cname = array.dtype.names[idx] ftype = array.dtype.fields[cname][0] format = self._col_format_cls.from_recformat(ftype) # Determine the appropriate dimensions for items in the column # (typically just 1D) dim = array.dtype[idx].shape[::-1] if dim and (len(dim) > 1 or 'A' in format): if 'A' in format: # n x m string arrays must include the max string # length in their dimensions (e.g. l x n x m) dim = (array.dtype[idx].base.itemsize,) + dim dim = repr(dim).replace(' ', '') else: dim = None # Check for unsigned ints. bzero = None if 'I' in format and ftype == np.dtype('uint16'): bzero = np.uint16(2**15) elif 'J' in format and ftype == np.dtype('uint32'): bzero = np.uint32(2**31) elif 'K' in format and ftype == np.dtype('uint64'): bzero = np.uint64(2**63) c = Column(name=cname, format=format, array=array.view(np.ndarray)[cname], bzero=bzero, dim=dim) self.columns.append(c) def _init_from_table(self, table): hdr = table._header nfields = hdr['TFIELDS'] # go through header keywords to pick out column definition keywords # definition dictionaries for each field col_keywords = [{} for i in range(nfields)] for keyword, value in hdr.items(): key = TDEF_RE.match(keyword) try: keyword = key.group('label') except Exception: continue # skip if there is no match if keyword in KEYWORD_NAMES: col = int(key.group('num')) if 0 < col <= nfields: attr = KEYWORD_TO_ATTRIBUTE[keyword] if attr == 'format': # Go ahead and convert the format value to the # appropriate ColumnFormat container now value = self._col_format_cls(value) col_keywords[col - 1][attr] = value # Verify the column keywords and display any warnings if necessary; # we only want to pass on the valid keywords for idx, kwargs in enumerate(col_keywords): valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs) for val in invalid_kwargs.values(): warnings.warn( 'Invalid keyword for column {}: {}'.format(idx + 1, val[1]), VerifyWarning) # Special cases for recformat and dim # TODO: Try to eliminate the need for these special cases del valid_kwargs['recformat'] if 'dim' in valid_kwargs: valid_kwargs['dim'] = kwargs['dim'] col_keywords[idx] = valid_kwargs # data reading will be delayed for col in range(nfields): col_keywords[col]['array'] = Delayed(table, col) # now build the columns self.columns = [Column(**attrs) for attrs in col_keywords] # Add the table HDU is a listener to changes to the columns # (either changes to individual columns, or changes to the set of # columns (add/remove/etc.)) self._add_listener(table) def __copy__(self): return self.__class__(self) def __deepcopy__(self, memo): return self.__class__([copy.deepcopy(c, memo) for c in self.columns]) def _copy_column(self, column): """Utility function used currently only by _init_from_coldefs to help convert columns from binary format to ASCII format or vice versa if necessary (otherwise performs a straight copy). """ if isinstance(column.format, self._col_format_cls): # This column has a FITS format compatible with this column # definitions class (that is ascii or binary) return column.copy() new_column = column.copy() # Try to use the Numpy recformat as the equivalency between the # two formats; if that conversion can't be made then these # columns can't be transferred # TODO: Catch exceptions here and raise an explicit error about # column format conversion new_column.format = self._col_format_cls.from_column_format( column.format) # Handle a few special cases of column format options that are not # compatible between ASCII an binary tables # TODO: This is sort of hacked in right now; we really need # separate classes for ASCII and Binary table Columns, and they # should handle formatting issues like these if not isinstance(new_column.format, _AsciiColumnFormat): # the column is a binary table column... new_column.start = None if new_column.null is not None: # We can't just "guess" a value to represent null # values in the new column, so just disable this for # now; users may modify it later new_column.null = None else: # the column is an ASCII table column... if new_column.null is not None: new_column.null = DEFAULT_ASCII_TNULL if (new_column.disp is not None and new_column.disp.upper().startswith('L')): # ASCII columns may not use the logical data display format; # for now just drop the TDISPn option for this column as we # don't have a systematic conversion of boolean data to ASCII # tables yet new_column.disp = None return new_column def __getattr__(self, name): """ Automatically returns the values for the given keyword attribute for all `Column`s in this list. Implements for example self.units, self.formats, etc. """ cname = name[:-1] if cname in KEYWORD_ATTRIBUTES and name[-1] == 's': attr = [] for col in self.columns: val = getattr(col, cname) attr.append(val if val is not None else '') return attr raise AttributeError(name) @lazyproperty def dtype(self): # Note: This previously returned a dtype that just used the raw field # widths based on the format's repeat count, and did not incorporate # field *shapes* as provided by TDIMn keywords. # Now this incorporates TDIMn from the start, which makes *this* method # a little more complicated, but simplifies code elsewhere (for example # fields will have the correct shapes even in the raw recarray). fields = [] offsets = [0] for name, format_, dim in zip(self.names, self.formats, self._dims): dt = format_.dtype if len(offsets) < len(self.formats): # Note: the size of the *original* format_ may be greater than # one would expect from the number of elements determined by # dim. The FITS format allows this--the rest of the field is # filled with undefined values. offsets.append(offsets[-1] + dt.itemsize) if dim: if format_.format == 'A': dt = np.dtype((dt.char + str(dim[-1]), dim[:-1])) else: dt = np.dtype((dt.base, dim)) fields.append((name, dt)) return nh.realign_dtype(np.dtype(fields), offsets) @lazyproperty def names(self): return [col.name for col in self.columns] @lazyproperty def formats(self): return [col.format for col in self.columns] @lazyproperty def _arrays(self): return [col.array for col in self.columns] @lazyproperty def _recformats(self): return [fmt.recformat for fmt in self.formats] @lazyproperty def _dims(self): """Returns the values of the TDIMn keywords parsed into tuples.""" return [col._dims for col in self.columns] def __getitem__(self, key): if isinstance(key, str): key = _get_index(self.names, key) x = self.columns[key] if _is_int(key): return x else: return ColDefs(x) def __len__(self): return len(self.columns) def __repr__(self): rep = 'ColDefs(' if hasattr(self, 'columns') and self.columns: # The hasattr check is mostly just useful in debugging sessions # where self.columns may not be defined yet rep += '\n ' rep += '\n '.join([repr(c) for c in self.columns]) rep += '\n' rep += ')' return rep def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.columns) else: raise TypeError('Wrong type of input.') if option == 'left': tmp = list(self.columns) + b else: tmp = b + list(self.columns) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for key in other] indx = list(range(len(self))) for x in _other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def _update_column_attribute_changed(self, column, attr, old_value, new_value): """ Handle column attribute changed notifications from columns that are members of this `ColDefs`. `ColDefs` itself does not currently do anything with this, and just bubbles the notification up to any listening table HDUs that may need to update their headers, etc. However, this also informs the table of the numerical index of the column that changed. """ idx = 0 for idx, col in enumerate(self.columns): if col is column: break if attr == 'name': del self.names elif attr == 'format': del self.formats self._notify('column_attribute_changed', column, idx, attr, old_value, new_value) def add_col(self, column): """ Append one `Column` to the column definition. """ if not isinstance(column, Column): raise AssertionError self._arrays.append(column.array) # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats self.columns.append(column) # Listen for changes on the new column column._add_listener(self) # If this ColDefs is being tracked by a Table, inform the # table that its data is now invalid. self._notify('column_added', self, column) return self def del_col(self, col_name): """ Delete (the definition of) one `Column`. col_name : str or int The column's name or index """ indx = _get_index(self.names, col_name) col = self.columns[indx] del self._arrays[indx] # Obliterate caches of certain things del self.dtype del self._recformats del self._dims del self.names del self.formats del self.columns[indx] col._remove_listener(self) # If this ColDefs is being tracked by a table HDU, inform the HDU (or # any other listeners) that the column has been removed # Just send a reference to self, and the index of the column that was # removed self._notify('column_removed', self, indx) return self def change_attrib(self, col_name, attrib, new_value): """ Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`. Parameters ---------- col_name : str or int The column name or index to change attrib : str The attribute name new_value : object The new value for the attribute """ setattr(self[col_name], attrib, new_value) def change_name(self, col_name, new_name): """ Change a `Column`'s name. Parameters ---------- col_name : str The current name of the column new_name : str The new name of the column """ if new_name != col_name and new_name in self.names: raise ValueError('New name {} already exists.'.format(new_name)) else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): """ Change a `Column`'s unit. Parameters ---------- col_name : str or int The column name or index new_unit : str The new unit for the column """ self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all', output=None): """ Get attribute(s) information of the column definition. Parameters ---------- attrib : str Can be one or more of the attributes listed in ``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is ``"all"`` which will print out all attributes. It forgives plurals and blanks. If there are two or more attribute names, they must be separated by comma(s). output : file, optional File-like object to output to. Outputs to stdout by default. If `False`, returns the attributes as a `dict` instead. Notes ----- This function doesn't return anything by default; it just prints to stdout. """ if output is None: output = sys.stdout if attrib.strip().lower() in ['all', '']: lst = KEYWORD_ATTRIBUTES else: lst = attrib.split(',') for idx in range(len(lst)): lst[idx] = lst[idx].strip().lower() if lst[idx][-1] == 's': lst[idx] = list[idx][:-1] ret = {} for attr in lst: if output: if attr not in KEYWORD_ATTRIBUTES: output.write("'{}' is not an attribute of the column " "definitions.\n".format(attr)) continue output.write("{}:\n".format(attr)) output.write(' {}\n'.format(getattr(self, attr + 's'))) else: ret[attr] = getattr(self, attr + 's') if not output: return ret class _AsciiColDefs(ColDefs): """ColDefs implementation for ASCII tables.""" _padding_byte = ' ' _col_format_cls = _AsciiColumnFormat def __init__(self, input, ascii=True): super().__init__(input) # if the format of an ASCII column has no width, add one if not isinstance(input, _AsciiColDefs): self._update_field_metrics() else: for idx, s in enumerate(input.starts): self.columns[idx].start = s self._spans = input.spans self._width = input._width @lazyproperty def dtype(self): dtype = {} for j in range(len(self)): data_type = 'S' + str(self.spans[j]) dtype[self.names[j]] = (data_type, self.starts[j] - 1) return np.dtype(dtype) @property def spans(self): """A list of the widths of each field in the table.""" return self._spans @lazyproperty def _recformats(self): if len(self) == 1: widths = [] else: widths = [y - x for x, y in pairwise(self.starts)] # Widths is the width of each field *including* any space between # fields; this is so that we can map the fields to string records in a # Numpy recarray widths.append(self._width - self.starts[-1] + 1) return ['a' + str(w) for w in widths] def add_col(self, column): super().add_col(column) self._update_field_metrics() def del_col(self, col_name): super().del_col(col_name) self._update_field_metrics() def _update_field_metrics(self): """ Updates the list of the start columns, the list of the widths of each field, and the total width of each record in the table. """ spans = [0] * len(self.columns) end_col = 0 # Refers to the ASCII text column, not the table col for idx, col in enumerate(self.columns): width = col.format.width # Update the start columns and column span widths taking into # account the case that the starting column of a field may not # be the column immediately after the previous field if not col.start: col.start = end_col + 1 end_col = col.start + width - 1 spans[idx] = width self._spans = spans self._width = end_col # Utilities class _VLF(np.ndarray): """Variable length field object.""" def __new__(cls, input, dtype='a'): """ Parameters ---------- input a sequence of variable-sized elements. """ if dtype == 'a': try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! input = [chararray.array(x, itemsize=1) for x in input] except Exception: raise ValueError( 'Inconsistent input data array: {0}'.format(input)) a = np.array(input, dtype=object) self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object) self.max = 0 self.element_dtype = dtype return self def __array_finalize__(self, obj): if obj is None: return self.max = obj.max self.element_dtype = obj.element_dtype def __setitem__(self, key, value): """ To make sure the new item has consistent data type to avoid misalignment. """ if isinstance(value, np.ndarray) and value.dtype == self.dtype: pass elif isinstance(value, chararray.chararray) and value.itemsize == 1: pass elif self.element_dtype == 'a': value = chararray.array(value, itemsize=1) else: value = np.array(value, dtype=self.element_dtype) np.ndarray.__setitem__(self, key, value) self.max = max(self.max, len(value)) def _get_index(names, key): """ Get the index of the ``key`` in the ``names`` list. The ``key`` can be an integer or string. If integer, it is the index in the list. If string, a. Field (column) names are case sensitive: you can have two different columns called 'abc' and 'ABC' respectively. b. When you *refer* to a field (presumably with the field method), it will try to match the exact name first, so in the example in (a), field('abc') will get the first field, and field('ABC') will get the second field. If there is no exact name matched, it will try to match the name with case insensitivity. So, in the last example, field('Abc') will cause an exception since there is no unique mapping. If there is a field named "XYZ" and no other field name is a case variant of "XYZ", then field('xyz'), field('Xyz'), etc. will get this field. """ if _is_int(key): indx = int(key) elif isinstance(key, str): # try to find exact match first try: indx = names.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip() names = [n.lower().rstrip() for n in names] count = names.count(_key) # occurrence of _key in names if count == 1: indx = names.index(_key) elif count == 0: raise KeyError("Key '{}' does not exist.".format(key)) else: # multiple match raise KeyError("Ambiguous key name '{}'.".format(key)) else: raise KeyError("Illegal key '{!r}'.".format(key)) return indx def _unwrapx(input, output, repeat): """ Unwrap the X format column into a Boolean array. Parameters ---------- input input ``Uint8`` array of shape (`s`, `nbytes`) output output Boolean array of shape (`s`, `repeat`) repeat number of bits """ pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8') nbytes = ((repeat - 1) // 8) + 1 for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8]) def _wrapx(input, output, repeat): """ Wrap the X format column Boolean array into an ``UInt8`` array. Parameters ---------- input input Boolean array of shape (`s`, `repeat`) output output ``Uint8`` array of shape (`s`, `nbytes`) repeat number of bits """ output[...] = 0 # reset the output nbytes = ((repeat - 1) // 8) + 1 unused = nbytes * 8 - repeat for i in range(nbytes): _min = i * 8 _max = min((i + 1) * 8, repeat) for j in range(_min, _max): if j != _min: np.left_shift(output[..., i], 1, output[..., i]) np.add(output[..., i], input[..., j], output[..., i]) # shift the unused bits np.left_shift(output[..., i], unused, output[..., i]) def _makep(array, descr_output, format, nrows=None): """ Construct the P (or Q) format column array, both the data descriptors and the data. It returns the output "data" array of data type `dtype`. The descriptor location will have a zero offset for all columns after this call. The final offset will be calculated when the file is written. Parameters ---------- array input object array descr_output output "descriptor" array of data type int32 (for P format arrays) or int64 (for Q format arrays)--must be nrows long in its first dimension format the _FormatP object representing the format of the variable array nrows : int, optional number of rows to create in the column; defaults to the number of rows in the input array """ # TODO: A great deal of this is redundant with FITS_rec._convert_p; see if # we can merge the two somehow. _offset = 0 if not nrows: nrows = len(array) data_output = _VLF([None] * nrows, dtype=format.dtype) if format.dtype == 'a': _nbytes = 1 else: _nbytes = np.array([], dtype=format.dtype).itemsize for idx in range(nrows): if idx < len(array): rowval = array[idx] else: if format.dtype == 'a': rowval = ' ' * data_output.max else: rowval = [0] * data_output.max if format.dtype == 'a': data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1) else: data_output[idx] = np.array(rowval, dtype=format.dtype) descr_output[idx, 0] = len(data_output[idx]) descr_output[idx, 1] = _offset _offset += len(data_output[idx]) * _nbytes return data_output def _parse_tformat(tform): """Parse ``TFORMn`` keyword for a binary table into a ``(repeat, format, option)`` tuple. """ try: (repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups() except Exception: # TODO: Maybe catch this error use a default type (bytes, maybe?) for # unrecognized column types. As long as we can determine the correct # byte width somehow.. raise VerifyError('Format {!r} is not recognized.'.format(tform)) if repeat == '': repeat = 1 else: repeat = int(repeat) return (repeat, format.upper(), option) def _parse_ascii_tformat(tform, strict=False): """ Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width, precision)`` tuple (the latter is always zero unless format is one of 'E', 'F', or 'D'). """ match = TFORMAT_ASCII_RE.match(tform.strip()) if not match: raise VerifyError('Format {!r} is not recognized.'.format(tform)) # Be flexible on case format = match.group('format') if format is None: # Floating point format format = match.group('formatf').upper() width = match.group('widthf') precision = match.group('precision') if width is None or precision is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: width = 0 if width is None else width precision = 1 if precision is None else precision else: format = format.upper() width = match.group('width') if width is None: if strict: raise VerifyError('Format {!r} is not unambiguously an ASCII ' 'table format.') else: # Just use a default width of 0 if unspecified width = 0 precision = 0 def convert_int(val): msg = ('Format {!r} is not valid--field width and decimal precision ' 'must be integers.') try: val = int(val) except (ValueError, TypeError): raise VerifyError(msg.format(tform)) return val if width and precision: # This should only be the case for floating-point formats width, precision = convert_int(width), convert_int(precision) elif width: # Just for integer/string formats; ignore precision width = convert_int(width) else: # For any format, if width was unspecified use the set defaults width, precision = ASCII_DEFAULT_WIDTHS[format] if width <= 0: raise VerifyError("Format {!r} not valid--field width must be a " "positive integeter.".format(tform)) if precision >= width: raise VerifyError("Format {!r} not valid--the number of decimal digits " "must be less than the format's total " "width {}.".format(tform, width)) return format, width, precision def _parse_tdim(tdim): """Parse the ``TDIM`` value into a tuple (may return an empty tuple if the value ``TDIM`` value is empty or invalid). """ m = tdim and TDIM_RE.match(tdim) if m: dims = m.group('dims') return tuple(int(d.strip()) for d in dims.split(','))[::-1] # Ignore any dim values that don't specify a multidimensional column return tuple() def _scalar_to_format(value): """ Given a scalar value or string, returns the minimum FITS column format that can represent that value. 'minimum' is defined by the order given in FORMATORDER. """ # First, if value is a string, try to convert to the appropriate scalar # value for type_ in (int, float, complex): try: value = type_(value) break except ValueError: continue numpy_dtype_str = np.min_scalar_type(value).str numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness try: fits_format = NUMPY2FITS[numpy_dtype_str] return FITSUPCONVERTERS.get(fits_format, fits_format) except KeyError: return "A" + str(len(value)) def _cmp_recformats(f1, f2): """ Compares two numpy recformats using the ordering given by FORMATORDER. """ if f1[0] == 'a' and f2[0] == 'a': return cmp(int(f1[1:]), int(f2[1:])) else: f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2] return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2)) def _convert_fits2record(format): """ Convert FITS format spec to record format spec. """ repeat, dtype, option = _parse_tformat(format) if dtype in FITS2NUMPY: if dtype == 'A': output_format = FITS2NUMPY[dtype] + str(repeat) # to accommodate both the ASCII table and binary table column # format spec, i.e. A7 in ASCII table is the same as 7A in # binary table, so both will produce 'a7'. # Technically the FITS standard does not allow this but it's a very # common mistake if format.lstrip()[0] == 'A' and option != '': # make sure option is integer output_format = FITS2NUMPY[dtype] + str(int(option)) else: repeat_str = '' if repeat != 1: repeat_str = str(repeat) output_format = repeat_str + FITS2NUMPY[dtype] elif dtype == 'X': output_format = _FormatX(repeat) elif dtype == 'P': output_format = _FormatP.from_tform(format) elif dtype == 'Q': output_format = _FormatQ.from_tform(format) elif dtype == 'F': output_format = 'f8' else: raise ValueError('Illegal format {}.'.format(format)) return output_format def _convert_record2fits(format): """ Convert record format spec to FITS format spec. """ recformat, kind, dtype = _dtype_to_recformat(format) shape = dtype.shape itemsize = dtype.base.itemsize if dtype.char == 'U': # Unicode dtype--itemsize is 4 times actual ASCII character length, # which what matters for FITS column formats # Use dtype.base--dtype may be a multi-dimensional dtype itemsize = itemsize // 4 option = str(itemsize) ndims = len(shape) repeat = 1 if ndims > 0: nel = np.array(shape, dtype='i8').prod() if nel > 1: repeat = nel if kind == 'a': # This is a kludge that will place string arrays into a # single field, so at least we won't lose data. Need to # use a TDIM keyword to fix this, declaring as (slength, # dim1, dim2, ...) as mwrfits does ntot = int(repeat) * int(option) output_format = str(ntot) + 'A' elif recformat in NUMPY2FITS: # record format if repeat != 1: repeat = str(repeat) else: repeat = '' output_format = repeat + NUMPY2FITS[recformat] else: raise ValueError('Illegal format {}.'.format(format)) return output_format def _dtype_to_recformat(dtype): """ Utility function for converting a dtype object or string that instantiates a dtype (e.g. 'float32') into one of the two character Numpy format codes that have been traditionally used by Astropy. In particular, use of 'a' to refer to character data is long since deprecated in Numpy, but Astropy remains heavily invested in its use (something to try to get away from sooner rather than later). """ if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) kind = dtype.base.kind if kind in ('U', 'S'): recformat = kind = 'a' else: itemsize = dtype.base.itemsize recformat = kind + str(itemsize) return recformat, kind, dtype def _convert_format(format, reverse=False): """ Convert FITS format spec to record format spec. Do the opposite if reverse=True. """ if reverse: return _convert_record2fits(format) else: return _convert_fits2record(format) def _convert_ascii_format(format, reverse=False): """Convert ASCII table format spec to record format spec.""" if reverse: recformat, kind, dtype = _dtype_to_recformat(format) itemsize = dtype.itemsize if kind == 'a': return 'A' + str(itemsize) elif NUMPY2FITS.get(recformat) == 'L': # Special case for logical/boolean types--for ASCII tables we # represent these as single character columns containing 'T' or 'F' # (a la the storage format for Logical columns in binary tables) return 'A1' elif kind == 'i': # Use for the width the maximum required to represent integers # of that byte size plus 1 for signs, but use a minimum of the # default width (to keep with existing behavior) width = 1 + len(str(2 ** (itemsize * 8))) width = max(width, ASCII_DEFAULT_WIDTHS['I'][0]) return 'I' + str(width) elif kind == 'f': # This is tricky, but go ahead and use D if float-64, and E # if float-32 with their default widths if itemsize >= 8: format = 'D' else: format = 'E' width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format]) return format + width # TODO: There may be reasonable ways to represent other Numpy types so # let's see what other possibilities there are besides just 'a', 'i', # and 'f'. If it doesn't have a reasonable ASCII representation then # raise an exception else: format, width, precision = _parse_ascii_tformat(format) # This gives a sensible "default" dtype for a given ASCII # format code recformat = ASCII2NUMPY[format] # The following logic is taken from CFITSIO: # For integers, if the width <= 4 we can safely use 16-bit ints for all # values [for the non-standard J format code just always force 64-bit] if format == 'I' and width <= 4: recformat = 'i2' elif format == 'A': recformat += str(width) return recformat
660fcf0c99c5fc68e2947eb08f80be105ed74fb782fd03377a578840d882a931
# Licensed under a 3-clause BSD style license - see PYFITS.rst import collections import copy import itertools import re import warnings from .card import Card, _pad, KEYWORD_LENGTH from .file import _File from .util import encode_ascii, decode_ascii, fileobj_closed, fileobj_is_binary from ...utils import isiterable from ...utils.exceptions import AstropyUserWarning from ...utils.decorators import deprecated_renamed_argument BLOCK_SIZE = 2880 # the FITS block size # This regular expression can match a *valid* END card which just consists of # the string 'END' followed by all spaces, or an *invalid* end card which # consists of END, followed by any character that is *not* a valid character # for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which # starts with 'END' but is not 'END'), followed by any arbitrary bytes. An # invalid end card may also consist of just 'END' with no trailing bytes. HEADER_END_RE = re.compile(encode_ascii( r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])')) # According to the FITS standard the only characters that may appear in a # header record are the restricted ASCII chars from 0x20 through 0x7E. VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F))) END_CARD = 'END' + ' ' * 77 __doctest_skip__ = ['Header', 'Header.*'] class Header: """ FITS header class. This class exposes both a dict-like interface and a list-like interface to FITS headers. The header may be indexed by keyword and, like a dict, the associated value will be returned. When the header contains cards with duplicate keywords, only the value of the first card with the given keyword will be returned. It is also possible to use a 2-tuple as the index in the form (keyword, n)--this returns the n-th value with that keyword, in the case where there are duplicate keywords. For example:: >>> header['NAXIS'] 0 >>> header[('FOO', 1)] # Return the value of the second FOO keyword 'foo' The header may also be indexed by card number:: >>> header[0] # Return the value of the first card in the header 'T' Commentary keywords such as HISTORY and COMMENT are special cases: When indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all the HISTORY/COMMENT values is returned:: >>> header['HISTORY'] This is the first history entry in this header. This is the second history entry in this header. ... See the Astropy documentation for more details on working with headers. """ def __init__(self, cards=[], copy=False): """ Construct a `Header` from an iterable and/or text file. Parameters ---------- cards : A list of `Card` objects, optional The cards to initialize the header with. Also allowed are other `Header` (or `dict`-like) objects. .. versionchanged:: 1.2 Allowed ``cards`` to be a `dict`-like object. copy : bool, optional If ``True`` copies the ``cards`` if they were another `Header` instance. Default is ``False``. .. versionadded:: 1.3 """ self.clear() if isinstance(cards, Header): if copy: cards = cards.copy() cards = cards.cards elif isinstance(cards, dict): cards = cards.items() for card in cards: self.append(card, end=True) self._modified = False def __len__(self): return len(self._cards) def __iter__(self): for card in self._cards: yield card.keyword def __contains__(self, keyword): if keyword in self._keyword_indices or keyword in self._rvkc_indices: # For the most common case (single, standard form keyword lookup) # this will work and is an O(1) check. If it fails that doesn't # guarantee absence, just that we have to perform the full set of # checks in self._cardindex return True try: self._cardindex(keyword) except (KeyError, IndexError): return False return True def __getitem__(self, key): if isinstance(key, slice): return Header([copy.copy(c) for c in self._cards[key]]) elif self._haswildcard(key): return Header([copy.copy(self._cards[idx]) for idx in self._wildcardmatch(key)]) elif (isinstance(key, str) and key.upper() in Card._commentary_keywords): key = key.upper() # Special case for commentary cards return _HeaderCommentaryCards(self, key) if isinstance(key, tuple): keyword = key[0] else: keyword = key card = self._cards[self._cardindex(key)] if card.field_specifier is not None and keyword == card.rawkeyword: # This is RVKC; if only the top-level keyword was specified return # the raw value, not the parsed out float value return card.rawvalue return card.value def __setitem__(self, key, value): if self._set_slice(key, value, self): return if isinstance(value, tuple): if not (0 < len(value) <= 2): raise ValueError( 'A Header item may be set with either a scalar value, ' 'a 1-tuple containing a scalar value, or a 2-tuple ' 'containing a scalar value and comment string.') if len(value) == 1: value, comment = value[0], None if value is None: value = '' elif len(value) == 2: value, comment = value if value is None: value = '' if comment is None: comment = '' else: comment = None card = None if isinstance(key, int): card = self._cards[key] elif isinstance(key, tuple): card = self._cards[self._cardindex(key)] if card: card.value = value if comment is not None: card.comment = comment if card._modified: self._modified = True else: # If we get an IndexError that should be raised; we don't allow # assignment to non-existing indices self._update((key, value, comment)) def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # This is very inefficient but it's not a commonly used feature. # If someone out there complains that they make heavy use of slice # deletions and it's too slow, well, we can worry about it then # [the solution is not too complicated--it would be wait 'til all # the cards are deleted before updating _keyword_indices rather # than updating it once for each card that gets deleted] if isinstance(key, slice): indices = range(*key.indices(len(self))) # If the slice step is backwards we want to reverse it, because # it will be reversed in a few lines... if key.step and key.step < 0: indices = reversed(indices) else: indices = self._wildcardmatch(key) for idx in reversed(indices): del self[idx] return elif isinstance(key, str): # delete ALL cards with the same keyword name key = Card.normalize_keyword(key) indices = self._keyword_indices if key not in self._keyword_indices: indices = self._rvkc_indices if key not in indices: # if keyword is not present raise KeyError. # To delete keyword without caring if they were present, # Header.remove(Keyword) can be used with optional argument ignore_missing as True raise KeyError("Keyword '{}' not found.".format(key)) for idx in reversed(indices[key]): # Have to copy the indices list since it will be modified below del self[idx] return idx = self._cardindex(key) card = self._cards[idx] keyword = card.keyword del self._cards[idx] keyword = Card.normalize_keyword(keyword) indices = self._keyword_indices[keyword] indices.remove(idx) if not indices: del self._keyword_indices[keyword] # Also update RVKC indices if necessary :/ if card.field_specifier is not None: indices = self._rvkc_indices[card.rawkeyword] indices.remove(idx) if not indices: del self._rvkc_indices[card.rawkeyword] # We also need to update all other indices self._updateindices(idx, increment=False) self._modified = True def __repr__(self): return self.tostring(sep='\n', endcard=False, padding=False) def __str__(self): return self.tostring() def __eq__(self, other): """ Two Headers are equal only if they have the exact same string representation. """ return str(self) == str(other) def __add__(self, other): temp = self.copy(strip=False) temp.extend(other) return temp def __iadd__(self, other): self.extend(other) return self def _ipython_key_completions_(self): return self.__iter__() @property def cards(self): """ The underlying physical cards that make up this Header; it can be looked at, but it should not be modified directly. """ return _CardAccessor(self) @property def comments(self): """ View the comments associated with each keyword, if any. For example, to see the comment on the NAXIS keyword: >>> header.comments['NAXIS'] number of data axes Comments can also be updated through this interface: >>> header.comments['NAXIS'] = 'Number of data axes' """ return _HeaderComments(self) @property def _modified(self): """ Whether or not the header has been modified; this is a property so that it can also check each card for modifications--cards may have been modified directly without the header containing it otherwise knowing. """ modified_cards = any(c._modified for c in self._cards) if modified_cards: # If any cards were modified then by definition the header was # modified self.__dict__['_modified'] = True return self.__dict__['_modified'] @_modified.setter def _modified(self, val): self.__dict__['_modified'] = val @classmethod def fromstring(cls, data, sep=''): """ Creates an HDU header from a byte string containing the entire header data. Parameters ---------- data : str String containing the entire header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). Returns ------- header A new `Header` instance. """ cards = [] # If the card separator contains characters that may validly appear in # a card, the only way to unambiguously distinguish between cards is to # require that they be Card.length long. However, if the separator # contains non-valid characters (namely \n) the cards may be split # immediately at the separator require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS) # Split the header into individual cards idx = 0 image = [] while idx < len(data): if require_full_cardlength: end_idx = idx + Card.length else: try: end_idx = data.index(sep, idx) except ValueError: end_idx = len(data) next_image = data[idx:end_idx] idx = end_idx + len(sep) if image: if next_image[:8] == 'CONTINUE': image.append(next_image) continue cards.append(Card.fromstring(''.join(image))) if require_full_cardlength: if next_image == END_CARD: image = [] break else: if next_image.split(sep)[0].rstrip() == 'END': image = [] break image = [next_image] # Add the last image that was found before the end, if any if image: cards.append(Card.fromstring(''.join(image))) return cls(cards) @classmethod def fromfile(cls, fileobj, sep='', endcard=True, padding=True): """ Similar to :meth:`Header.fromstring`, but reads the header string from a given file-like object or filename. Parameters ---------- fileobj : str, file-like A filename or an open file-like object from which a FITS header is to be read. For open file handles the file pointer must be at the beginning of the header. sep : str, optional The string separating cards from each other, such as a newline. By default there is no card separator (as is the case in a raw FITS file). endcard : bool, optional If True (the default) the header must end with an END card in order to be considered valid. If an END card is not found an `OSError` is raised. padding : bool, optional If True (the default) the header will be required to be padded out to a multiple of 2880, the FITS header block size. Otherwise any padding, or lack thereof, is ignored. Returns ------- header A new `Header` instance. """ close_file = False if isinstance(fileobj, str): # Open in text mode by default to support newline handling; if a # binary-mode file object is passed in, the user is on their own # with respect to newline handling fileobj = open(fileobj, 'r') close_file = True try: is_binary = fileobj_is_binary(fileobj) def block_iter(nbytes): while True: data = fileobj.read(nbytes) if data: yield data else: break return cls._from_blocks(block_iter, is_binary, sep, endcard, padding)[1] finally: if close_file: fileobj.close() @classmethod def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding): """ The meat of `Header.fromfile`; in a separate method so that `Header.fromfile` itself is just responsible for wrapping file handling. Also used by `_BaseHDU.fromstring`. ``block_iter`` should be a callable which, given a block size n (typically 2880 bytes as used by the FITS standard) returns an iterator of byte strings of that block size. ``is_binary`` specifies whether the returned blocks are bytes or text Returns both the entire header *string*, and the `Header` object returned by Header.fromstring on that string. """ actual_block_size = _block_size(sep) clen = Card.length + len(sep) blocks = block_iter(actual_block_size) # Read the first header block. try: block = next(blocks) except StopIteration: raise EOFError() if not is_binary: # TODO: There needs to be error handling at *this* level for # non-ASCII characters; maybe at this stage decoding latin-1 might # be safer block = encode_ascii(block) read_blocks = [] is_eof = False end_found = False # continue reading header blocks until END card or EOF is reached while True: # find the END card end_found, block = cls._find_end_card(block, clen) read_blocks.append(decode_ascii(block)) if end_found: break try: block = next(blocks) except StopIteration: is_eof = True break if not block: is_eof = True break if not is_binary: block = encode_ascii(block) if not end_found and is_eof and endcard: # TODO: Pass this error to validation framework as an ERROR, # rather than raising an exception raise OSError('Header missing END card.') header_str = ''.join(read_blocks) # Strip any zero-padding (see ticket #106) if header_str and header_str[-1] == '\0': if is_eof and header_str.strip('\0') == '': # TODO: Pass this warning to validation framework warnings.warn( 'Unexpected extra padding at the end of the file. This ' 'padding may not be preserved when saving changes.', AstropyUserWarning) raise EOFError() else: # Replace the illegal null bytes with spaces as required by # the FITS standard, and issue a nasty warning # TODO: Pass this warning to validation framework warnings.warn( 'Header block contains null bytes instead of spaces for ' 'padding, and is not FITS-compliant. Nulls may be ' 'replaced with spaces upon writing.', AstropyUserWarning) header_str.replace('\0', ' ') if padding and (len(header_str) % actual_block_size) != 0: # This error message ignores the length of the separator for # now, but maybe it shouldn't? actual_len = len(header_str) - actual_block_size + BLOCK_SIZE # TODO: Pass this error to validation framework raise ValueError( 'Header size is not multiple of {0}: {1}'.format(BLOCK_SIZE, actual_len)) return header_str, cls.fromstring(header_str, sep=sep) @classmethod def _find_end_card(cls, block, card_len): """ Utility method to search a header block for the END card and handle invalid END cards. This method can also returned a modified copy of the input header block in case an invalid end card needs to be sanitized. """ for mo in HEADER_END_RE.finditer(block): # Ensure the END card was found, and it started on the # boundary of a new card (see ticket #142) if mo.start() % card_len != 0: continue # This must be the last header block, otherwise the # file is malformatted if mo.group('invalid'): offset = mo.start() trailing = block[offset + 3:offset + card_len - 3].rstrip() if trailing: trailing = repr(trailing).lstrip('ub') # TODO: Pass this warning up to the validation framework warnings.warn( 'Unexpected bytes trailing END keyword: {0}; these ' 'bytes will be replaced with spaces on write.'.format( trailing), AstropyUserWarning) else: # TODO: Pass this warning up to the validation framework warnings.warn( 'Missing padding to end of the FITS block after the ' 'END keyword; additional spaces will be appended to ' 'the file upon writing to pad out to {0} ' 'bytes.'.format(BLOCK_SIZE), AstropyUserWarning) # Sanitize out invalid END card now that the appropriate # warnings have been issued block = (block[:offset] + encode_ascii(END_CARD) + block[offset + len(END_CARD):]) return True, block return False, block def tostring(self, sep='', endcard=True, padding=True): r""" Returns a string representation of the header. By default this uses no separator between cards, adds the END card, and pads the string with spaces to the next multiple of 2880 bytes. That is, it returns the header exactly as it would appear in a FITS file. Parameters ---------- sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If True (default) adds the END card to the end of the header string padding : bool, optional If True (default) pads the string with spaces out to the next multiple of 2880 characters Returns ------- s : str A string representing a FITS header. """ lines = [] for card in self._cards: s = str(card) # Cards with CONTINUE cards may be longer than 80 chars; so break # them into multiple lines while s: lines.append(s[:Card.length]) s = s[Card.length:] s = sep.join(lines) if endcard: s += sep + _pad('END') if padding: s += ' ' * _pad_length(len(s)) return s @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tofile(self, fileobj, sep='', endcard=True, padding=True, overwrite=False): r""" Writes the header to file or file-like object. By default this writes the header exactly as it would be written to a FITS file, with the END card included and padding to the next multiple of 2880 bytes. However, aspects of this may be controlled. Parameters ---------- fileobj : str, file, optional Either the pathname of a file, or an open file handle or file-like object sep : str, optional The character or string with which to separate cards. By default there is no separator, but one could use ``'\\n'``, for example, to separate each card with a new line endcard : bool, optional If `True` (default) adds the END card to the end of the header string padding : bool, optional If `True` (default) pads the string with spaces out to the next multiple of 2880 characters overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. """ close_file = fileobj_closed(fileobj) if not isinstance(fileobj, _File): fileobj = _File(fileobj, mode='ostream', overwrite=overwrite) try: blocks = self.tostring(sep=sep, endcard=endcard, padding=padding) actual_block_size = _block_size(sep) if padding and len(blocks) % actual_block_size != 0: raise OSError( 'Header size ({}) is not a multiple of block ' 'size ({}).'.format( len(blocks) - actual_block_size + BLOCK_SIZE, BLOCK_SIZE)) if not fileobj.simulateonly: fileobj.flush() try: offset = fileobj.tell() except (AttributeError, OSError): offset = 0 fileobj.write(blocks.encode('ascii')) fileobj.flush() finally: if close_file: fileobj.close() @classmethod def fromtextfile(cls, fileobj, endcard=False): """ Read a header from a simple text file or file-like object. Equivalent to:: >>> Header.fromfile(fileobj, sep='\\n', endcard=False, ... padding=False) See Also -------- fromfile """ return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def totextfile(self, fileobj, endcard=False, overwrite=False): """ Write the header as text to a file or a file-like object. Equivalent to:: >>> Header.tofile(fileobj, sep='\\n', endcard=False, ... padding=False, overwrite=overwrite) .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. See Also -------- tofile """ self.tofile(fileobj, sep='\n', endcard=endcard, padding=False, overwrite=overwrite) def clear(self): """ Remove all cards from the header. """ self._cards = [] self._keyword_indices = collections.defaultdict(list) self._rvkc_indices = collections.defaultdict(list) def copy(self, strip=False): """ Make a copy of the :class:`Header`. .. versionchanged:: 1.3 `copy.copy` and `copy.deepcopy` on a `Header` will call this method. Parameters ---------- strip : bool, optional If `True`, strip any headers that are specific to one of the standard HDU types, so that this header can be used in a different HDU. Returns ------- header A new :class:`Header` instance. """ tmp = Header((copy.copy(card) for card in self._cards)) if strip: tmp._strip() return tmp def __copy__(self): return self.copy() def __deepcopy__(self, *args, **kwargs): return self.copy() @classmethod def fromkeys(cls, iterable, value=None): """ Similar to :meth:`dict.fromkeys`--creates a new `Header` from an iterable of keywords and an optional default value. This method is not likely to be particularly useful for creating real world FITS headers, but it is useful for testing. Parameters ---------- iterable Any iterable that returns strings representing FITS keywords. value : optional A default value to assign to each keyword; must be a valid type for FITS keywords. Returns ------- header A new `Header` instance. """ d = cls() if not isinstance(value, tuple): value = (value,) for key in iterable: d.append((key,) + value) return d def get(self, key, default=None): """ Similar to :meth:`dict.get`--returns the value associated with keyword in the header, or a default value if the keyword is not found. Parameters ---------- key : str A keyword that may or may not be in the header. default : optional A default value to return if the keyword is not found in the header. Returns ------- value The value associated with the given keyword, or the default value if the keyword is not in the header. """ try: return self[key] except (KeyError, IndexError): return default def set(self, keyword, value=None, comment=None, before=None, after=None): """ Set the value and/or comment and/or position of a specified keyword. If the keyword does not already exist in the header, a new keyword is created in the specified position, or appended to the end of the header if no position is specified. This method is similar to :meth:`Header.update` prior to Astropy v0.1. .. note:: It should be noted that ``header.set(keyword, value)`` and ``header.set(keyword, value, comment)`` are equivalent to ``header[keyword] = value`` and ``header[keyword] = (value, comment)`` respectively. New keywords can also be inserted relative to existing keywords using, for example:: >>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes')) to insert before an existing keyword, or:: >>> header.insert('NAXIS', ('NAXIS1', 4096), after=True) to insert after an existing keyword. The only advantage of using :meth:`Header.set` is that it easily replaces the old usage of :meth:`Header.update` both conceptually and in terms of function signature. Parameters ---------- keyword : str A header keyword value : str, optional The value to set for the given keyword; if None the existing value is kept, but '' may be used to set a blank value comment : str, optional The comment to set for the given keyword; if None the existing comment is kept, but ``''`` may be used to set a blank comment before : str, int, optional Name of the keyword, or index of the `Card` before which this card should be located in the header. The argument ``before`` takes precedence over ``after`` if both specified. after : str, int, optional Name of the keyword, or index of the `Card` after which this card should be located in the header. """ # Create a temporary card that looks like the one being set; if the # temporary card turns out to be a RVKC this will make it easier to # deal with the idiosyncrasies thereof # Don't try to make a temporary card though if they keyword looks like # it might be a HIERARCH card or is otherwise invalid--this step is # only for validating RVKCs. if (len(keyword) <= KEYWORD_LENGTH and Card._keywd_FSC_RE.match(keyword) and keyword not in self._keyword_indices): new_card = Card(keyword, value, comment) new_keyword = new_card.keyword else: new_keyword = keyword if (new_keyword not in Card._commentary_keywords and new_keyword in self): if comment is None: comment = self.comments[keyword] if value is None: value = self[keyword] self[keyword] = (value, comment) if before is not None or after is not None: card = self._cards[self._cardindex(keyword)] self._relativeinsert(card, before=before, after=after, replace=True) elif before is not None or after is not None: self._relativeinsert((keyword, value, comment), before=before, after=after) else: self[keyword] = (value, comment) def items(self): """Like :meth:`dict.items`.""" for card in self._cards: yield (card.keyword, card.value) def keys(self): """ Like :meth:`dict.keys`--iterating directly over the `Header` instance has the same behavior. """ return self.__iter__() def values(self): """Like :meth:`dict.values`.""" for _, v in self.items(): yield v def pop(self, *args): """ Works like :meth:`list.pop` if no arguments or an index argument are supplied; otherwise works like :meth:`dict.pop`. """ if len(args) > 2: raise TypeError('Header.pop expected at most 2 arguments, got ' '{}'.format(len(args))) if len(args) == 0: key = -1 else: key = args[0] try: value = self[key] except (KeyError, IndexError): if len(args) == 2: return args[1] raise del self[key] return value def popitem(self): """Similar to :meth:`dict.popitem`.""" try: k, v = next(self.items()) except StopIteration: raise KeyError('Header is empty') del self[k] return k, v def setdefault(self, key, default=None): """Similar to :meth:`dict.setdefault`.""" try: return self[key] except (KeyError, IndexError): self[key] = default return default def update(self, *args, **kwargs): """ Update the Header with new keyword values, updating the values of existing keywords and appending new keywords otherwise; similar to `dict.update`. `update` accepts either a dict-like object or an iterable. In the former case the keys must be header keywords and the values may be either scalar values or (value, comment) tuples. In the case of an iterable the items must be (keyword, value) tuples or (keyword, value, comment) tuples. Arbitrary arguments are also accepted, in which case the update() is called again with the kwargs dict as its only argument. That is, :: >>> header.update(NAXIS1=100, NAXIS2=100) is equivalent to:: header.update({'NAXIS1': 100, 'NAXIS2': 100}) .. warning:: As this method works similarly to `dict.update` it is very different from the ``Header.update()`` method in Astropy v0.1. Use of the old API was **deprecated** for a long time and is now removed. Most uses of the old API can be replaced as follows: * Replace :: header.update(keyword, value) with :: header[keyword] = value * Replace :: header.update(keyword, value, comment=comment) with :: header[keyword] = (value, comment) * Replace :: header.update(keyword, value, before=before_keyword) with :: header.insert(before_keyword, (keyword, value)) * Replace :: header.update(keyword, value, after=after_keyword) with :: header.insert(after_keyword, (keyword, value), after=True) See also :meth:`Header.set` which is a new method that provides an interface similar to the old ``Header.update()`` and may help make transition a little easier. """ if args: other = args[0] else: other = None def update_from_dict(k, v): if not isinstance(v, tuple): card = Card(k, v) elif 0 < len(v) <= 2: card = Card(*((k,) + v)) else: raise ValueError( 'Header update value for key %r is invalid; the ' 'value must be either a scalar, a 1-tuple ' 'containing the scalar value, or a 2-tuple ' 'containing the value and a comment string.' % k) self._update(card) if other is None: pass elif hasattr(other, 'items'): for k, v in other.items(): update_from_dict(k, v) elif hasattr(other, 'keys'): for k in other.keys(): update_from_dict(k, other[k]) else: for idx, card in enumerate(other): if isinstance(card, Card): self._update(card) elif isinstance(card, tuple) and (1 < len(card) <= 3): self._update(Card(*card)) else: raise ValueError( 'Header update sequence item #{} is invalid; ' 'the item must either be a 2-tuple containing ' 'a keyword and value, or a 3-tuple containing ' 'a keyword, value, and comment string.'.format(idx)) if kwargs: self.update(kwargs) def append(self, card=None, useblanks=True, bottom=False, end=False): """ Appends a new keyword+value card to the end of the Header, similar to `list.append`. By default if the last cards in the Header have commentary keywords, this will append the new keyword before the commentary (unless the new keyword is also commentary). Also differs from `list.append` in that it can be called with no arguments: In this case a blank card is appended to the end of the Header. In the case all the keyword arguments are ignored. Parameters ---------- card : str, tuple A keyword or a (keyword, value, [comment]) tuple representing a single header card; the comment is optional in which case a 2-tuple may be used useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. bottom : bool, optional If True, instead of appending after the last non-commentary card, append after the last non-blank card. end : bool, optional If True, ignore the useblanks and bottom options, and append at the very end of the Header. """ if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( 'The value appended to a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if not end and card.is_blank: # Blank cards should always just be appended to the end end = True if end: self._cards.append(card) idx = len(self._cards) - 1 else: idx = len(self._cards) - 1 while idx >= 0 and self._cards[idx].is_blank: idx -= 1 if not bottom and card.keyword not in Card._commentary_keywords: while (idx >= 0 and self._cards[idx].keyword in Card._commentary_keywords): idx -= 1 idx += 1 self._cards.insert(idx, card) self._updateindices(idx) keyword = Card.normalize_keyword(card.keyword) self._keyword_indices[keyword].append(idx) if card.field_specifier is not None: self._rvkc_indices[card.rawkeyword].append(idx) if not end: # If the appended card was a commentary card, and it was appended # before existing cards with the same keyword, the indices for # cards with that keyword may have changed if not bottom and card.keyword in Card._commentary_keywords: self._keyword_indices[keyword].sort() # Finally, if useblanks, delete a blank cards from the end if useblanks and self._countblanks(): # Don't do this unless there is at least one blanks at the end # of the header; we need to convert the card to its string # image to see how long it is. In the vast majority of cases # this will just be 80 (Card.length) but it may be longer for # CONTINUE cards self._useblanks(len(str(card)) // Card.length) self._modified = True def extend(self, cards, strip=True, unique=False, update=False, update_first=False, useblanks=True, bottom=False, end=False): """ Appends multiple keyword+value cards to the end of the header, similar to `list.extend`. Parameters ---------- cards : iterable An iterable of (keyword, value, [comment]) tuples; see `Header.append`. strip : bool, optional Remove any keywords that have meaning only to specific types of HDUs, so that only more general keywords are added from extension Header or Card list (default: `True`). unique : bool, optional If `True`, ensures that no duplicate keywords are appended; keywords already in this header are simply discarded. The exception is commentary keywords (COMMENT, HISTORY, etc.): they are only treated as duplicates if their values match. update : bool, optional If `True`, update the current header with the values and comments from duplicate keywords in the input header. This supercedes the ``unique`` argument. Commentary keywords are treated the same as if ``unique=True``. update_first : bool, optional If the first keyword in the header is 'SIMPLE', and the first keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is replaced by the 'XTENSION' keyword. Likewise if the first keyword in the header is 'XTENSION' and the first keyword in the input header is 'SIMPLE', the 'XTENSION' keyword is replaced by the 'SIMPLE' keyword. This behavior is otherwise dumb as to whether or not the resulting header is a valid primary or extension header. This is mostly provided to support backwards compatibility with the old ``Header.fromTxtFile`` method, and only applies if ``update=True``. useblanks, bottom, end : bool, optional These arguments are passed to :meth:`Header.append` while appending new cards to the header. """ temp = Header(cards) if strip: temp._strip() if len(self): first = self.cards[0].keyword else: first = None # We don't immediately modify the header, because first we need to sift # out any duplicates in the new header prior to adding them to the # existing header, but while *allowing* duplicates from the header # being extended from (see ticket #156) extend_cards = [] for idx, card in enumerate(temp.cards): keyword = card.keyword if keyword not in Card._commentary_keywords: if unique and not update and keyword in self: continue elif update: if idx == 0 and update_first: # Dumbly update the first keyword to either SIMPLE or # XTENSION as the case may be, as was in the case in # Header.fromTxtFile if ((keyword == 'SIMPLE' and first == 'XTENSION') or (keyword == 'XTENSION' and first == 'SIMPLE')): del self[0] self.insert(0, card) else: self[keyword] = (card.value, card.comment) elif keyword in self: self[keyword] = (card.value, card.comment) else: extend_cards.append(card) else: extend_cards.append(card) else: if (unique or update) and keyword in self: if card.is_blank: extend_cards.append(card) continue for value in self[keyword]: if value == card.value: break else: extend_cards.append(card) else: extend_cards.append(card) for card in extend_cards: self.append(card, useblanks=useblanks, bottom=bottom, end=end) def count(self, keyword): """ Returns the count of the given keyword in the header, similar to `list.count` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to count instances of in the header """ keyword = Card.normalize_keyword(keyword) # We have to look before we leap, since otherwise _keyword_indices, # being a defaultdict, will create an entry for the nonexistent keyword if keyword not in self._keyword_indices: raise KeyError("Keyword {!r} not found.".format(keyword)) return len(self._keyword_indices[keyword]) def index(self, keyword, start=None, stop=None): """ Returns the index if the first instance of the given keyword in the header, similar to `list.index` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword to look up in the list of all keywords in the header start : int, optional The lower bound for the index stop : int, optional The upper bound for the index """ if start is None: start = 0 if stop is None: stop = len(self._cards) if stop < start: step = -1 else: step = 1 norm_keyword = Card.normalize_keyword(keyword) for idx in range(start, stop, step): if self._cards[idx].keyword.upper() == norm_keyword: return idx else: raise ValueError('The keyword {!r} is not in the ' ' header.'.format(keyword)) def insert(self, key, card, useblanks=True, after=False): """ Inserts a new keyword+value card into the Header at a given location, similar to `list.insert`. Parameters ---------- key : int, str, or tuple The index into the list of header keywords before which the new keyword should be inserted, or the name of a keyword before which the new keyword should be inserted. Can also accept a (keyword, index) tuple for inserting around duplicate keywords. card : str, tuple A keyword or a (keyword, value, [comment]) tuple; see `Header.append` useblanks : bool, optional If there are blank cards at the end of the Header, replace the first blank card so that the total number of cards in the Header does not increase. Otherwise preserve the number of blank cards. after : bool, optional If set to `True`, insert *after* the specified index or keyword, rather than before it. Defaults to `False`. """ if not isinstance(key, int): # Don't pass through ints to _cardindex because it will not take # kindly to indices outside the existing number of cards in the # header, which insert needs to be able to support (for example # when inserting into empty headers) idx = self._cardindex(key) else: idx = key if after: if idx == -1: idx = len(self._cards) else: idx += 1 if idx >= len(self._cards): # This is just an append (Though it must be an append absolutely to # the bottom, ignoring blanks, etc.--the point of the insert method # is that you get exactly what you asked for with no surprises) self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( 'The value inserted into a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) self._cards.insert(idx, card) keyword = card.keyword # If idx was < 0, determine the actual index according to the rules # used by list.insert() if idx < 0: idx += len(self._cards) - 1 if idx < 0: idx = 0 # All the keyword indices above the insertion point must be updated self._updateindices(idx) keyword = Card.normalize_keyword(keyword) self._keyword_indices[keyword].append(idx) count = len(self._keyword_indices[keyword]) if count > 1: # There were already keywords with this same name if keyword not in Card._commentary_keywords: warnings.warn( 'A {!r} keyword already exists in this header. Inserting ' 'duplicate keyword.'.format(keyword), AstropyUserWarning) self._keyword_indices[keyword].sort() if card.field_specifier is not None: # Update the index of RVKC as well rvkc_indices = self._rvkc_indices[card.rawkeyword] rvkc_indices.append(idx) rvkc_indices.sort() if useblanks: self._useblanks(len(str(card)) // Card.length) self._modified = True def remove(self, keyword, ignore_missing=False, remove_all=False): """ Removes the first instance of the given keyword from the header similar to `list.remove` if the Header object is treated as a list of keywords. Parameters ---------- keyword : str The keyword of which to remove the first instance in the header. ignore_missing : bool, optional When True, ignores missing keywords. Otherwise, if the keyword is not present in the header a KeyError is raised. remove_all : bool, optional When True, all instances of keyword will be removed. Otherwise only the first instance of the given keyword is removed. """ keyword = Card.normalize_keyword(keyword) if keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] if remove_all: while keyword in self._keyword_indices: del self[self._keyword_indices[keyword][0]] elif not ignore_missing: raise KeyError("Keyword '{}' not found.".format(keyword)) def rename_keyword(self, oldkeyword, newkeyword, force=False): """ Rename a card's keyword in the header. Parameters ---------- oldkeyword : str or int Old keyword or card index newkeyword : str New keyword force : bool, optional When `True`, if the new keyword already exists in the header, force the creation of a duplicate keyword. Otherwise a `ValueError` is raised. """ oldkeyword = Card.normalize_keyword(oldkeyword) newkeyword = Card.normalize_keyword(newkeyword) if newkeyword == 'CONTINUE': raise ValueError('Can not rename to CONTINUE') if (newkeyword in Card._commentary_keywords or oldkeyword in Card._commentary_keywords): if not (newkeyword in Card._commentary_keywords and oldkeyword in Card._commentary_keywords): raise ValueError('Regular and commentary keys can not be ' 'renamed to each other.') elif not force and newkeyword in self: raise ValueError('Intended keyword {} already exists in header.' .format(newkeyword)) idx = self.index(oldkeyword) card = self.cards[idx] del self[idx] self.insert(idx, (newkeyword, card.value, card.comment)) def add_history(self, value, before=None, after=None): """ Add a ``HISTORY`` card. Parameters ---------- value : str History text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('HISTORY', value, before=before, after=after) def add_comment(self, value, before=None, after=None): """ Add a ``COMMENT`` card. Parameters ---------- value : str Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('COMMENT', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): """ Add a blank card. Parameters ---------- value : str, optional Text to be added. before : str or int, optional Same as in `Header.update` after : str or int, optional Same as in `Header.update` """ self._add_commentary('', value, before=before, after=after) def _update(self, card): """ The real update code. If keyword already exists, its value and/or comment will be updated. Otherwise a new card will be appended. This will not create a duplicate keyword except in the case of commentary cards. The only other way to force creation of a duplicate is to use the insert(), append(), or extend() methods. """ keyword, value, comment = card # Lookups for existing/known keywords are case-insensitive keyword = keyword.upper() if keyword.startswith('HIERARCH '): keyword = keyword[9:] if (keyword not in Card._commentary_keywords and keyword in self._keyword_indices): # Easy; just update the value/comment idx = self._keyword_indices[keyword][0] existing_card = self._cards[idx] existing_card.value = value if comment is not None: # '' should be used to explicitly blank a comment existing_card.comment = comment if existing_card._modified: self._modified = True elif keyword in Card._commentary_keywords: cards = self._splitcommentary(keyword, value) if keyword in self._keyword_indices: # Append after the last keyword of the same type idx = self.index(keyword, start=len(self) - 1, stop=-1) isblank = not (keyword or value or comment) for c in reversed(cards): self.insert(idx + 1, c, useblanks=(not isblank)) else: for c in cards: self.append(c, bottom=True) else: # A new keyword! self.append() will handle updating _modified self.append(card) def _cardindex(self, key): """Returns an index into the ._cards list given a valid lookup key.""" # This used to just set key = (key, 0) and then go on to act as if the # user passed in a tuple, but it's much more common to just be given a # string as the key, so optimize more for that case if isinstance(key, str): keyword = key n = 0 elif isinstance(key, int): # If < 0, determine the actual index if key < 0: key += len(self._cards) if key < 0 or key >= len(self._cards): raise IndexError('Header index out of range.') return key elif isinstance(key, slice): return key elif isinstance(key, tuple): if (len(key) != 2 or not isinstance(key[0], str) or not isinstance(key[1], int)): raise ValueError( 'Tuple indices must be 2-tuples consisting of a ' 'keyword string and an integer index.') keyword, n = key else: raise ValueError( 'Header indices must be either a string, a 2-tuple, or ' 'an integer.') keyword = Card.normalize_keyword(keyword) # Returns the index into _cards for the n-th card with the given # keyword (where n is 0-based) indices = self._keyword_indices.get(keyword, None) if keyword and not indices: if len(keyword) > KEYWORD_LENGTH or '.' in keyword: raise KeyError("Keyword {!r} not found.".format(keyword)) else: # Maybe it's a RVKC? indices = self._rvkc_indices.get(keyword, None) if not indices: raise KeyError("Keyword {!r} not found.".format(keyword)) try: return indices[n] except IndexError: raise IndexError('There are only {} {!r} cards in the ' 'header.'.format(len(indices), keyword)) def _keyword_from_index(self, idx): """ Given an integer index, return the (keyword, repeat) tuple that index refers to. For most keywords the repeat will always be zero, but it may be greater than zero for keywords that are duplicated (especially commentary keywords). In a sense this is the inverse of self.index, except that it also supports duplicates. """ if idx < 0: idx += len(self._cards) keyword = self._cards[idx].keyword keyword = Card.normalize_keyword(keyword) repeat = self._keyword_indices[keyword].index(idx) return keyword, repeat def _relativeinsert(self, card, before=None, after=None, replace=False): """ Inserts a new card before or after an existing card; used to implement support for the legacy before/after keyword arguments to Header.update(). If replace=True, move an existing card with the same keyword. """ if before is None: insertionkey = after else: insertionkey = before def get_insertion_idx(): if not (isinstance(insertionkey, int) and insertionkey >= len(self._cards)): idx = self._cardindex(insertionkey) else: idx = insertionkey if before is None: idx += 1 return idx if replace: # The card presumably already exists somewhere in the header. # Check whether or not we actually have to move it; if it does need # to be moved we just delete it and then it will be reinserted # below old_idx = self._cardindex(card.keyword) insertion_idx = get_insertion_idx() if (insertion_idx >= len(self._cards) and old_idx == len(self._cards) - 1): # The card would be appended to the end, but it's already at # the end return if before is not None: if old_idx == insertion_idx - 1: return elif after is not None and old_idx == insertion_idx: return del self[old_idx] # Even if replace=True, the insertion idx may have changed since the # old card was deleted idx = get_insertion_idx() if card[0] in Card._commentary_keywords: cards = reversed(self._splitcommentary(card[0], card[1])) else: cards = [card] for c in cards: self.insert(idx, c) def _updateindices(self, idx, increment=True): """ For all cards with index above idx, increment or decrement its index value in the keyword_indices dict. """ if idx > len(self._cards): # Save us some effort return increment = 1 if increment else -1 for index_sets in (self._keyword_indices, self._rvkc_indices): for indices in index_sets.values(): for jdx, keyword_index in enumerate(indices): if keyword_index >= idx: indices[jdx] += increment def _countblanks(self): """Returns the number of blank cards at the end of the Header.""" for idx in range(1, len(self._cards)): if not self._cards[-idx].is_blank: return idx - 1 return 0 def _useblanks(self, count): for _ in range(count): if self._cards[-1].is_blank: del self[-1] else: break def _haswildcard(self, keyword): """Return `True` if the input keyword contains a wildcard pattern.""" return (isinstance(keyword, str) and (keyword.endswith('...') or '*' in keyword or '?' in keyword)) def _wildcardmatch(self, pattern): """ Returns a list of indices of the cards matching the given wildcard pattern. * '*' matches 0 or more characters * '?' matches a single character * '...' matches 0 or more of any non-whitespace character """ pattern = pattern.replace('*', r'.*').replace('?', r'.') pattern = pattern.replace('...', r'\S*') + '$' pattern_re = re.compile(pattern, re.I) return [idx for idx, card in enumerate(self._cards) if pattern_re.match(card.keyword)] def _set_slice(self, key, value, target): """ Used to implement Header.__setitem__ and CardAccessor.__setitem__. """ if isinstance(key, slice) or self._haswildcard(key): if isinstance(key, slice): indices = range(*key.indices(len(target))) else: indices = self._wildcardmatch(key) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): target[idx] = val return True return False def _splitcommentary(self, keyword, value): """ Given a commentary keyword and value, returns a list of the one or more cards needed to represent the full value. This is primarily used to create the multiple commentary cards needed to represent a long value that won't fit into a single commentary card. """ # The maximum value in each card can be the maximum card length minus # the maximum key length (which can include spaces if they key length # less than 8 maxlen = Card.length - KEYWORD_LENGTH valuestr = str(value) if len(valuestr) <= maxlen: # The value can fit in a single card cards = [Card(keyword, value)] else: # The value must be split across multiple consecutive commentary # cards idx = 0 cards = [] while idx < len(valuestr): cards.append(Card(keyword, valuestr[idx:idx + maxlen])) idx += maxlen return cards def _strip(self): """ Strip cards specific to a certain kind of header. Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of the header can be used to reconstruct another kind of header. """ # TODO: Previously this only deleted some cards specific to an HDU if # _hdutype matched that type. But it seemed simple enough to just # delete all desired cards anyways, and just ignore the KeyErrors if # they don't exist. # However, it might be desirable to make this extendable somehow--have # a way for HDU classes to specify some headers that are specific only # to that type, and should be removed otherwise. if 'NAXIS' in self: naxis = self['NAXIS'] else: naxis = 0 if 'TFIELDS' in self: tfields = self['TFIELDS'] else: tfields = 0 for idx in range(naxis): try: del self['NAXIS' + str(idx + 1)] except KeyError: pass for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'): for idx in range(tfields): try: del self[name + str(idx + 1)] except KeyError: pass for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND', 'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO', 'TFIELDS'): try: del self[name] except KeyError: pass def _add_commentary(self, key, value, before=None, after=None): """ Add a commentary card. If ``before`` and ``after`` are `None`, add to the last occurrence of cards of the same name (except blank card). If there is no card (or blank card), append at the end. """ if before is not None or after is not None: self._relativeinsert((key, value), before=before, after=after) else: self[key] = value collections.MutableSequence.register(Header) collections.MutableMapping.register(Header) class _CardAccessor: """ This is a generic class for wrapping a Header in such a way that you can use the header's slice/filtering capabilities to return a subset of cards and do something with them. This is sort of the opposite notion of the old CardList class--whereas Header used to use CardList to get lists of cards, this uses Header to get lists of cards. """ # TODO: Consider giving this dict/list methods like Header itself def __init__(self, header): self._header = header def __repr__(self): return '\n'.join(repr(c) for c in self._header._cards) def __len__(self): return len(self._header._cards) def __iter__(self): return iter(self._header._cards) def __eq__(self, other): # If the `other` item is a scalar we will still treat it as equal if # this _CardAccessor only contains one item if not isiterable(other) or isinstance(other, str): if len(self) == 1: other = [other] else: return False for a, b in itertools.zip_longest(self, other): if a != b: return False else: return True def __ne__(self, other): return not (self == other) def __getitem__(self, item): if isinstance(item, slice) or self._header._haswildcard(item): return self.__class__(self._header[item]) idx = self._header._cardindex(item) return self._header._cards[idx] def _setslice(self, item, value): """ Helper for implementing __setitem__ on _CardAccessor subclasses; slices should always be handled in this same way. """ if isinstance(item, slice) or self._header._haswildcard(item): if isinstance(item, slice): indices = range(*item.indices(len(self))) else: indices = self._header._wildcardmatch(item) if isinstance(value, str) or not isiterable(value): value = itertools.repeat(value, len(indices)) for idx, val in zip(indices, value): self[idx] = val return True return False collections.Mapping.register(_CardAccessor) collections.Sequence.register(_CardAccessor) class _HeaderComments(_CardAccessor): """ A class used internally by the Header class for the Header.comments attribute access. This object can be used to display all the keyword comments in the Header, or look up the comments on specific keywords. It allows all the same forms of keyword lookup as the Header class itself, but returns comments instead of values. """ def __iter__(self): for card in self._header._cards: yield card.comment def __repr__(self): """Returns a simple list of all keywords and their comments.""" keyword_length = KEYWORD_LENGTH for card in self._header._cards: keyword_length = max(keyword_length, len(card.keyword)) return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment, len=keyword_length) for c in self._header._cards) def __getitem__(self, item): """ Slices and filter strings return a new _HeaderComments containing the returned cards. Otherwise the comment of a single card is returned. """ item = super().__getitem__(item) if isinstance(item, _HeaderComments): # The item key was a slice return item return item.comment def __setitem__(self, item, comment): """ Set/update the comment on specified card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, comment, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards idx = self._header._cardindex(item) value = self._header[idx] self._header[idx] = (value, comment) class _HeaderCommentaryCards(_CardAccessor): """ This is used to return a list-like sequence over all the values in the header for a given commentary keyword, such as HISTORY. """ def __init__(self, header, keyword=''): super().__init__(header) self._keyword = keyword self._count = self._header.count(self._keyword) self._indices = slice(self._count).indices(self._count) # __len__ and __iter__ need to be overridden from the base class due to the # different approach this class has to take for slicing def __len__(self): return len(range(*self._indices)) def __iter__(self): for idx in range(*self._indices): yield self._header[(self._keyword, idx)] def __repr__(self): return '\n'.join(self) def __getitem__(self, idx): if isinstance(idx, slice): n = self.__class__(self._header, self._keyword) n._indices = idx.indices(self._count) return n elif not isinstance(idx, int): raise ValueError('{} index must be an integer'.format(self._keyword)) idx = list(range(*self._indices))[idx] return self._header[(self._keyword, idx)] def __setitem__(self, item, value): """ Set the value of a specified commentary card or cards. Slice/filter updates work similarly to how Header.__setitem__ works. """ if self._header._set_slice(item, value, self): return # In this case, key/index errors should be raised; don't update # comments of nonexistent cards self._header[(self._keyword, item)] = value def _block_size(sep): """ Determine the size of a FITS header block if a non-blank separator is used between cards. """ return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1)) def _pad_length(stringlen): """Bytes needed to pad the input stringlen to the next FITS block.""" return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
0de99bf65ac362a12d26e7a3e7b396fa1fca37d9511c5b60410c23ce40551b7f
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import operator import warnings import weakref from contextlib import suppress from functools import reduce import numpy as np from numpy import char as chararray from .column import (ASCIITNULL, FITS2NUMPY, ASCII2NUMPY, ASCII2STR, ColDefs, _AsciiColDefs, _FormatX, _FormatP, _VLF, _get_index, _wrapx, _unwrapx, _makep, Delayed) from .util import decode_ascii, encode_ascii, _rstrip_inplace from ...utils import lazyproperty class FITS_record: """ FITS record class. `FITS_record` is used to access records of the `FITS_rec` object. This will allow us to deal with scaled columns. It also handles conversion/scaling of columns in ASCII tables. The `FITS_record` class expects a `FITS_rec` object as input. """ def __init__(self, input, row=0, start=None, end=None, step=None, base=None, **kwargs): """ Parameters ---------- input : array The array to wrap. row : int, optional The starting logical row of the array. start : int, optional The starting column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. end : int, optional The ending column in the row associated with this object. Used for subsetting the columns of the `FITS_rec` object. """ self.array = input self.row = row if base: width = len(base) else: width = self.array._nfields s = slice(start, end, step).indices(width) self.start, self.end, self.step = s self.base = base def __getitem__(self, key): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError("Key '{}' does not exist.".format(key)) elif isinstance(key, slice): return type(self)(self.array, self.row, key.start, key.stop, key.step, self) else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') return self.array.field(indx)[self.row] def __setitem__(self, key, value): if isinstance(key, str): indx = _get_index(self.array.names, key) if indx < self.start or indx > self.end - 1: raise KeyError("Key '{}' does not exist.".format(key)) elif isinstance(key, slice): for indx in range(slice.start, slice.stop, slice.step): indx = self._get_indx(indx) self.array.field(indx)[self.row] = value else: indx = self._get_index(key) if indx > self.array._nfields - 1: raise IndexError('Index out of bounds') self.array.field(indx)[self.row] = value def __len__(self): return len(range(self.start, self.end, self.step)) def __repr__(self): """ Display a single row. """ outlist = [] for idx in range(len(self)): outlist.append(repr(self[idx])) return '({})'.format(', '.join(outlist)) def field(self, field): """ Get the field data of the record. """ return self.__getitem__(field) def setfield(self, field, value): """ Set the field data of the record. """ self.__setitem__(field, value) @lazyproperty def _bases(self): bases = [weakref.proxy(self)] base = self.base while base: bases.append(base) base = base.base return bases def _get_index(self, indx): indices = np.ogrid[:self.array._nfields] for base in reversed(self._bases): if base.step < 1: s = slice(base.start, None, base.step) else: s = slice(base.start, base.end, base.step) indices = indices[s] return indices[indx] class FITS_rec(np.recarray): """ FITS record array class. `FITS_rec` is the data part of a table HDU's data part. This is a layer over the `~numpy.recarray`, so we can deal with scaled columns. It inherits all of the standard methods from `numpy.ndarray`. """ _record_type = FITS_record _character_as_bytes = False def __new__(subtype, input): """ Construct a FITS record array from a recarray. """ # input should be a record array if input.dtype.subdtype is None: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data) else: self = np.recarray.__new__(subtype, input.shape, input.dtype, buf=input.data, strides=input.strides) self._init() if self.dtype.fields: self._nfields = len(self.dtype.fields) return self def __setstate__(self, state): meta = state[-1] column_state = state[-2] state = state[:-2] super().__setstate__(state) self._col_weakrefs = weakref.WeakSet() for attr, value in zip(meta, column_state): setattr(self, attr, value) def __reduce__(self): """ Return a 3-tuple for pickling a FITS_rec. Use the super-class functionality but then add in a tuple of FITS_rec-specific values that get used in __setstate__. """ reconst_func, reconst_func_args, state = super().__reduce__() # Define FITS_rec-specific attrs that get added to state column_state = [] meta = [] for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields', '_gap', '_uint', 'parnames', '_coldefs']: with suppress(AttributeError): # _coldefs can be Delayed, and file objects cannot be # picked, it needs to be deepcopied first if attrs == '_coldefs': column_state.append(self._coldefs.__deepcopy__(None)) else: column_state.append(getattr(self, attrs)) meta.append(attrs) state = state + (column_state, meta) return reconst_func, reconst_func_args, state def __array_finalize__(self, obj): if obj is None: return if isinstance(obj, FITS_rec): self._character_as_bytes = obj._character_as_bytes if isinstance(obj, FITS_rec) and obj.dtype == self.dtype: self._converted = obj._converted self._heapoffset = obj._heapoffset self._heapsize = obj._heapsize self._col_weakrefs = obj._col_weakrefs self._coldefs = obj._coldefs self._nfields = obj._nfields self._gap = obj._gap self._uint = obj._uint elif self.dtype.fields is not None: # This will allow regular ndarrays with fields, rather than # just other FITS_rec objects self._nfields = len(self.dtype.fields) self._converted = {} self._heapoffset = getattr(obj, '_heapoffset', 0) self._heapsize = getattr(obj, '_heapsize', 0) self._gap = getattr(obj, '_gap', 0) self._uint = getattr(obj, '_uint', False) self._col_weakrefs = weakref.WeakSet() self._coldefs = ColDefs(self) # Work around chicken-egg problem. Column.array relies on the # _coldefs attribute to set up ref back to parent FITS_rec; however # in the above line the self._coldefs has not been assigned yet so # this fails. This patches that up... for col in self._coldefs: del col.array col._parent_fits_rec = weakref.ref(self) else: self._init() def _init(self): """Initializes internal attributes specific to FITS-isms.""" self._nfields = 0 self._converted = {} self._heapoffset = 0 self._heapsize = 0 self._col_weakrefs = weakref.WeakSet() self._coldefs = None self._gap = 0 self._uint = False @classmethod def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False): """ Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec` object. .. note:: This was originally part of the ``new_table`` function in the table module but was moved into a class method since most of its functionality always had more to do with initializing a `FITS_rec` object than anything else, and much of it also overlapped with ``FITS_rec._scale_back``. Parameters ---------- columns : sequence of `Column` or a `ColDefs` The columns from which to create the table data. If these columns have data arrays attached that data may be used in initializing the new table. Otherwise the input columns will be used as a template for a new table with the requested number of rows. nrows : int Number of rows in the new table. If the input columns have data associated with them, the size of the largest input column is used. Otherwise the default is 0. fill : bool If `True`, will fill all cells with zeros or blanks. If `False`, copy the data from input, undefined cells will still be filled with zeros/blanks. """ if not isinstance(columns, ColDefs): columns = ColDefs(columns) # read the delayed data for column in columns: arr = column.array if isinstance(arr, Delayed): if arr.hdu.data is None: column.array = None else: column.array = _get_recarray_field(arr.hdu.data, arr.field) # Reset columns._arrays (which we may want to just do away with # altogether del columns._arrays # use the largest column shape as the shape of the record if nrows == 0: for arr in columns._arrays: if arr is not None: dim = arr.shape[0] else: dim = 0 if dim > nrows: nrows = dim raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8) raw_data.fill(ord(columns._padding_byte)) data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls) data._character_as_bytes = character_as_bytes # Make sure the data is a listener for changes to the columns columns._add_listener(data) # Previously this assignment was made from hdu.columns, but that's a # bug since if a _TableBaseHDU has a FITS_rec in its .data attribute # the _TableBaseHDU.columns property is actually returned from # .data._coldefs, so this assignment was circular! Don't make that # mistake again. # All of this is an artifact of the fragility of the FITS_rec class, # and that it can't just be initialized by columns... data._coldefs = columns # If fill is True we don't copy anything from the column arrays. We're # just using them as a template, and returning a table filled with # zeros/blanks if fill: return data # Otherwise we have to fill the recarray with data from the input # columns for idx, column in enumerate(columns): # For each column in the ColDef object, determine the number of # rows in that column. This will be either the number of rows in # the ndarray associated with the column, or the number of rows # given in the call to this function, which ever is smaller. If # the input FILL argument is true, the number of rows is set to # zero so that no data is copied from the original input data. arr = column.array if arr is None: array_size = 0 else: array_size = len(arr) n = min(array_size, nrows) # TODO: At least *some* of this logic is mostly redundant with the # _convert_foo methods in this class; see if we can eliminate some # of that duplication. if not n: # The input column had an empty array, so just use the fill # value continue field = _get_recarray_field(data, idx) name = column.name fitsformat = column.format recformat = fitsformat.recformat outarr = field[:n] inarr = arr[:n] if isinstance(recformat, _FormatX): # Data is a bit array if inarr.shape[-1] == recformat.repeat: _wrapx(inarr, outarr, recformat.repeat) continue elif isinstance(recformat, _FormatP): data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows)) continue # TODO: Find a better way of determining that the column is meant # to be FITS L formatted elif recformat[-2:] == FITS2NUMPY['L'] and inarr.dtype == bool: # column is boolean # The raw data field should be filled with either 'T' or 'F' # (not 0). Use 'F' as a default field[:] = ord('F') # Also save the original boolean array in data._converted so # that it doesn't have to be re-converted converted = np.zeros(field.shape, dtype=bool) converted[:n] = inarr data._cache_field(name, converted) # TODO: Maybe this step isn't necessary at all if _scale_back # will handle it? inarr = np.where(inarr == np.False_, ord('F'), ord('T')) elif (columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints): # Temporary hack... bzero = column.bzero converted = np.zeros(field.shape, dtype=inarr.dtype) converted[:n] = inarr data._cache_field(name, converted) if n < nrows: # Pre-scale rows below the input data field[n:] = -bzero inarr = inarr - bzero elif isinstance(columns, _AsciiColDefs): # Regardless whether the format is character or numeric, if the # input array contains characters then it's already in the raw # format for ASCII tables if fitsformat._pseudo_logical: # Hack to support converting from 8-bit T/F characters # Normally the column array is a chararray of 1 character # strings, but we need to view it as a normal ndarray of # 8-bit ints to fill it with ASCII codes for 'T' and 'F' outarr = field.view(np.uint8, np.ndarray)[:n] elif arr.dtype.kind not in ('S', 'U'): # Set up views of numeric columns with the appropriate # numeric dtype # Fill with the appropriate blanks for the column format data._cache_field(name, np.zeros(nrows, dtype=arr.dtype)) outarr = data._converted[name][:n] outarr[:] = inarr continue if inarr.shape != outarr.shape: if (inarr.dtype.kind == outarr.dtype.kind and inarr.dtype.kind in ('U', 'S') and inarr.dtype != outarr.dtype): inarr_rowsize = inarr[0].size inarr = inarr.flatten().view(outarr.dtype) # This is a special case to handle input arrays with # non-trivial TDIMn. # By design each row of the outarray is 1-D, while each row of # the input array may be n-D if outarr.ndim > 1: # The normal case where the first dimension is the rows inarr_rowsize = inarr[0].size inarr = inarr.reshape(n, inarr_rowsize) outarr[:, :inarr_rowsize] = inarr else: # Special case for strings where the out array only has one # dimension (the second dimension is rolled up into the # strings outarr[:n] = inarr.ravel() else: outarr[:] = inarr # Now replace the original column array references with the new # fields # This is required to prevent the issue reported in # https://github.com/spacetelescope/PyFITS/issues/99 for idx in range(len(columns)): columns._arrays[idx] = data.field(idx) return data def __repr__(self): # Force use of the normal ndarray repr (rather than the new # one added for recarray in Numpy 1.10) for backwards compat return np.ndarray.__repr__(self) def __getitem__(self, key): if self._coldefs is None: return super().__getitem__(key) if isinstance(key, str): return self.field(key) # Have to view as a recarray then back as a FITS_rec, otherwise the # circular reference fix/hack in FITS_rec.field() won't preserve # the slice. out = self.view(np.recarray)[key] if type(out) is not np.recarray: # Oops, we got a single element rather than a view. In that case, # return a Record, which has no __getstate__ and is more efficient. return self._record_type(self, key) # We got a view; change it back to our class, and add stuff out = out.view(type(self)) out._coldefs = ColDefs(self._coldefs) arrays = [] out._converted = {} for idx, name in enumerate(self._coldefs.names): # # Store the new arrays for the _coldefs object # arrays.append(self._coldefs._arrays[idx][key]) # Ensure that the sliced FITS_rec will view the same scaled # columns as the original; this is one of the few cases where # it is not necessary to use _cache_field() if name in self._converted: dummy = self._converted[name] field = np.ndarray.__getitem__(dummy, key) out._converted[name] = field out._coldefs._arrays = arrays return out def __setitem__(self, key, value): if self._coldefs is None: return super().__setitem__(key, value) if isinstance(key, str): self[key][:] = value return if isinstance(key, slice): end = min(len(self), key.stop or len(self)) end = max(0, end) start = max(0, key.start or 0) end = min(end, start + len(value)) for idx in range(start, end): self.__setitem__(idx, value[idx - start]) return if isinstance(value, FITS_record): for idx in range(self._nfields): self.field(self.names[idx])[key] = value.field(self.names[idx]) elif isinstance(value, (tuple, list, np.void)): if self._nfields == len(value): for idx in range(self._nfields): self.field(idx)[key] = value[idx] else: raise ValueError('Input tuple or list required to have {} ' 'elements.'.format(self._nfields)) else: raise TypeError('Assignment requires a FITS_record, tuple, or ' 'list as input.') def _ipython_key_completions_(self): return self.names def copy(self, order='C'): """ The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to `numpy.copy`. Differences include that it re-views the copied array as self's ndarray subclass, as though it were taking a slice; this means ``__array_finalize__`` is called and the copy shares all the array attributes (including ``._converted``!). So we need to make a deep copy of all those attributes so that the two arrays truly do not share any data. """ new = super().copy(order=order) new.__dict__ = copy.deepcopy(self.__dict__) return new @property def columns(self): """ A user-visible accessor for the coldefs. See https://aeon.stsci.edu/ssb/trac/pyfits/ticket/44 """ return self._coldefs @property def _coldefs(self): # This used to be a normal internal attribute, but it was changed to a # property as a quick and transparent way to work around the reference # leak bug fixed in https://github.com/astropy/astropy/pull/4539 # # See the long comment in the Column.array property for more details # on this. But in short, FITS_rec now has a ._col_weakrefs attribute # which is a WeakSet of weakrefs to each Column in _coldefs. # # So whenever ._coldefs is set we also add each Column in the ColDefs # to the weakrefs set. This is an easy way to find out if a Column has # any references to it external to the FITS_rec (i.e. a user assigned a # column to a variable). If the column is still in _col_weakrefs then # there are other references to it external to this FITS_rec. We use # that information in __del__ to save off copies of the array data # for those columns to their Column.array property before our memory # is freed. return self.__dict__.get('_coldefs') @_coldefs.setter def _coldefs(self, cols): self.__dict__['_coldefs'] = cols if isinstance(cols, ColDefs): for col in cols.columns: self._col_weakrefs.add(col) @_coldefs.deleter def _coldefs(self): try: del self.__dict__['_coldefs'] except KeyError as exc: raise AttributeError(exc.args[0]) def __del__(self): try: del self._coldefs if self.dtype.fields is not None: for col in self._col_weakrefs: if col.array is not None: col.array = col.array.copy() # See issues #4690 and #4912 except (AttributeError, TypeError): # pragma: no cover pass @property def names(self): """List of column names.""" if self.dtype.fields: return list(self.dtype.names) elif getattr(self, '_coldefs', None) is not None: return self._coldefs.names else: return None @property def formats(self): """List of column FITS formats.""" if getattr(self, '_coldefs', None) is not None: return self._coldefs.formats return None @property def _raw_itemsize(self): """ Returns the size of row items that would be written to the raw FITS file, taking into account the possibility of unicode columns being compactified. Currently for internal use only. """ if _has_unicode_fields(self): total_itemsize = 0 for field in self.dtype.fields.values(): itemsize = field[0].itemsize if field[0].kind == 'U': itemsize = itemsize // 4 total_itemsize += itemsize return total_itemsize else: # Just return the normal itemsize return self.itemsize def field(self, key): """ A view of a `Column`'s data as an array. """ # NOTE: The *column* index may not be the same as the field index in # the recarray, if the column is a phantom column column = self.columns[key] name = column.name format = column.format if format.dtype.itemsize == 0: warnings.warn( 'Field {!r} has a repeat count of 0 in its format code, ' 'indicating an empty field.'.format(key)) return np.array([], dtype=format.dtype) # If field's base is a FITS_rec, we can run into trouble because it # contains a reference to the ._coldefs object of the original data; # this can lead to a circular reference; see ticket #49 base = self while (isinstance(base, FITS_rec) and isinstance(base.base, np.recarray)): base = base.base # base could still be a FITS_rec in some cases, so take care to # use rec.recarray.field to avoid a potential infinite # recursion field = _get_recarray_field(base, name) if name not in self._converted: recformat = format.recformat # TODO: If we're now passing the column to these subroutines, do we # really need to pass them the recformat? if isinstance(recformat, _FormatP): # for P format converted = self._convert_p(column, field, recformat) else: # Handle all other column data types which are fixed-width # fields converted = self._convert_other(column, field, recformat) # Note: Never assign values directly into the self._converted dict; # always go through self._cache_field; this way self._converted is # only used to store arrays that are not already direct views of # our own data. self._cache_field(name, converted) return converted return self._converted[name] def _cache_field(self, name, field): """ Do not store fields in _converted if one of its bases is self, or if it has a common base with self. This results in a reference cycle that cannot be broken since ndarrays do not participate in cyclic garbage collection. """ base = field while True: self_base = self while True: if self_base is base: return if getattr(self_base, 'base', None) is not None: self_base = self_base.base else: break if getattr(base, 'base', None) is not None: base = base.base else: break self._converted[name] = field def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value): """ Update how the data is formatted depending on changes to column attributes initiated by the user through the `Column` interface. Dispatches column attribute change notifications to individual methods for each attribute ``_update_column_<attr>`` """ method_name = '_update_column_{0}'.format(attr) if hasattr(self, method_name): # Right now this is so we can be lazy and not implement updaters # for every attribute yet--some we may not need at all, TBD getattr(self, method_name)(column, idx, old_value, new_value) def _update_column_name(self, column, idx, old_name, name): """Update the dtype field names when a column name is changed.""" dtype = self.dtype # Updating the names on the dtype should suffice dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:] def _convert_x(self, field, recformat): """Convert a raw table column to a bit array as specified by the FITS X format. """ dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_) _unwrapx(field, dummy, recformat.repeat) return dummy def _convert_p(self, column, field, recformat): """Convert a raw table column of FITS P or Q format descriptors to a VLA column with the array data returned from the heap. """ dummy = _VLF([None] * len(self), dtype=recformat.dtype) raw_data = self._get_raw_data() if raw_data is None: raise OSError( "Could not find heap data for the {!r} variable-length " "array column.".format(column.name)) for idx in range(len(self)): offset = field[idx, 1] + self._heapoffset count = field[idx, 0] if recformat.dtype == 'a': dt = np.dtype(recformat.dtype + str(1)) arr_len = count * dt.itemsize da = raw_data[offset:offset + arr_len].view(dt) da = np.char.array(da.view(dtype=dt), itemsize=count) dummy[idx] = decode_ascii(da) else: dt = np.dtype(recformat.dtype) arr_len = count * dt.itemsize dummy[idx] = raw_data[offset:offset + arr_len].view(dt) dummy[idx].dtype = dummy[idx].dtype.newbyteorder('>') # Each array in the field may now require additional # scaling depending on the other scaling parameters # TODO: The same scaling parameters apply to every # array in the column so this is currently very slow; we # really only need to check once whether any scaling will # be necessary and skip this step if not # TODO: Test that this works for X format; I don't think # that it does--the recformat variable only applies to the P # format not the X format dummy[idx] = self._convert_other(column, dummy[idx], recformat) return dummy def _convert_ascii(self, column, field): """ Special handling for ASCII table columns to convert columns containing numeric types to actual numeric arrays from the string representation. """ format = column.format recformat = ASCII2NUMPY[format[0]] # if the string = TNULL, return ASCIITNULL nullval = str(column.null).strip().encode('ascii') if len(nullval) > format.width: nullval = nullval[:format.width] # Before using .replace make sure that any trailing bytes in each # column are filled with spaces, and *not*, say, nulls; this causes # functions like replace to potentially leave gibberish bytes in the # array buffer. dummy = np.char.ljust(field, format.width) dummy = np.char.replace(dummy, encode_ascii('D'), encode_ascii('E')) null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width)) # Convert all fields equal to the TNULL value (nullval) to empty fields. # TODO: These fields really should be conerted to NaN or something else undefined. # Currently they are converted to empty fields, which are then set to zero. dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy) # always replace empty fields, see https://github.com/astropy/astropy/pull/5394 if nullval != b'': dummy = np.where(np.char.strip(dummy) == b'', null_fill, dummy) try: dummy = np.array(dummy, dtype=recformat) except ValueError as exc: indx = self.names.index(column.name) raise ValueError( '{}; the header may be missing the necessary TNULL{} ' 'keyword or the table contains invalid data'.format( exc, indx + 1)) return dummy def _convert_other(self, column, field, recformat): """Perform conversions on any other fixed-width column data types. This may not perform any conversion at all if it's not necessary, in which case the original column array is returned. """ if isinstance(recformat, _FormatX): # special handling for the X format return self._convert_x(field, recformat) (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \ self._get_scale_factors(column) indx = self.names.index(column.name) # ASCII table, convert strings to numbers # TODO: # For now, check that these are ASCII columns by checking the coldefs # type; in the future all columns (for binary tables, ASCII tables, or # otherwise) should "know" what type they are already and how to handle # converting their data from FITS format to native format and vice # versa... if not _str and isinstance(self._coldefs, _AsciiColDefs): field = self._convert_ascii(column, field) # Test that the dimensions given in dim are sensible; otherwise # display a warning and ignore them if dim: # See if the dimensions already match, if not, make sure the # number items will fit in the specified dimensions if field.ndim > 1: actual_shape = field.shape[1:] if _str: actual_shape = actual_shape + (field.itemsize,) else: actual_shape = field.shape[0] if dim == actual_shape: # The array already has the correct dimensions, so we # ignore dim and don't convert dim = None else: nitems = reduce(operator.mul, dim) if _str: actual_nitems = field.itemsize elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1 actual_nitems = 1 else: actual_nitems = field.shape[1] if nitems > actual_nitems: warnings.warn( 'TDIM{} value {:d} does not fit with the size of ' 'the array items ({:d}). TDIM{:d} will be ignored.' .format(indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1)) dim = None # further conversion for both ASCII and binary tables # For now we've made columns responsible for *knowing* whether their # data has been scaled, but we make the FITS_rec class responsible for # actually doing the scaling # TODO: This also needs to be fixed in the effort to make Columns # responsible for scaling their arrays to/from FITS native values if not column.ascii and column.format.p_format: format_code = column.format.p_format else: # TODO: Rather than having this if/else it might be nice if the # ColumnFormat class had an attribute guaranteed to give the format # of actual values in a column regardless of whether the true # format is something like P or Q format_code = column.format.format if (_number and (_scale or _zero) and not column._physical_values): # This is to handle pseudo unsigned ints in table columns # TODO: For now this only really works correctly for binary tables # Should it work for ASCII tables as well? if self._uint: if bzero == 2**15 and format_code == 'I': field = np.array(field, dtype=np.uint16) elif bzero == 2**31 and format_code == 'J': field = np.array(field, dtype=np.uint32) elif bzero == 2**63 and format_code == 'K': field = np.array(field, dtype=np.uint64) bzero64 = np.uint64(2 ** 63) else: field = np.array(field, dtype=np.float64) else: field = np.array(field, dtype=np.float64) if _scale: np.multiply(field, bscale, field) if _zero: if self._uint and format_code == 'K': # There is a chance of overflow, so be careful test_overflow = field.copy() try: test_overflow += bzero64 except OverflowError: warnings.warn( "Overflow detected while applying TZERO{0:d}. " "Returning unscaled data.".format(indx + 1)) else: field = test_overflow else: field += bzero # mark the column as scaled column._physical_values = True elif _bool and field.dtype != bool: field = np.equal(field, ord('T')) elif _str: if not self._character_as_bytes: with suppress(UnicodeDecodeError): field = decode_ascii(field) if dim: # Apply the new field item dimensions nitems = reduce(operator.mul, dim) if field.ndim > 1: field = field[:, :nitems] if _str: fmt = field.dtype.char dtype = ('|{}{}'.format(fmt, dim[-1]), dim[:-1]) field.dtype = dtype else: field.shape = (field.shape[0],) + dim return field def _get_heap_data(self): """ Returns a pointer into the table's raw data to its heap (if present). This is returned as a numpy byte array. """ if self._heapsize: raw_data = self._get_raw_data().view(np.ubyte) heap_end = self._heapoffset + self._heapsize return raw_data[self._heapoffset:heap_end] else: return np.array([], dtype=np.ubyte) def _get_raw_data(self): """ Returns the base array of self that "raw data array" that is the array in the format that it was first read from a file before it was sliced or viewed as a different type in any way. This is determined by walking through the bases until finding one that has at least the same number of bytes as self, plus the heapsize. This may be the immediate .base but is not always. This is used primarily for variable-length array support which needs to be able to find the heap (the raw data *may* be larger than nbytes + heapsize if it contains a gap or padding). May return ``None`` if no array resembling the "raw data" according to the stated criteria can be found. """ raw_data_bytes = self.nbytes + self._heapsize base = self while hasattr(base, 'base') and base.base is not None: base = base.base if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes: return base def _get_scale_factors(self, column): """Get all the scaling flags and factors for one column.""" # TODO: Maybe this should be a method/property on Column? Or maybe # it's not really needed at all... _str = column.format.format == 'A' _bool = column.format.format == 'L' _number = not (_bool or _str) bscale = column.bscale bzero = column.bzero _scale = bscale not in ('', None, 1) _zero = bzero not in ('', None, 0) # ensure bscale/bzero are numbers if not _scale: bscale = 1 if not _zero: bzero = 0 # column._dims gives a tuple, rather than column.dim which returns the # original string format code from the FITS header... dim = column._dims return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim) def _scale_back(self, update_heap_pointers=True): """ Update the parent array, using the (latest) scaled array. If ``update_heap_pointers`` is `False`, this will leave all the heap pointers in P/Q columns as they are verbatim--it only makes sense to do this if there is already data on the heap and it can be guaranteed that that data has not been modified, and there is not new data to add to the heap. Currently this is only used as an optimization for CompImageHDU that does its own handling of the heap. """ # Running total for the new heap size heapsize = 0 for indx, name in enumerate(self.dtype.names): column = self._coldefs[indx] recformat = column.format.recformat raw_field = _get_recarray_field(self, indx) # add the location offset of the heap area for each # variable length column if isinstance(recformat, _FormatP): # Irritatingly, this can return a different dtype than just # doing np.dtype(recformat.dtype); but this returns the results # that we want. For example if recformat.dtype is 'a' we want # an array of characters. dtype = np.array([], dtype=recformat.dtype).dtype if update_heap_pointers and name in self._converted: # The VLA has potentially been updated, so we need to # update the array descriptors raw_field[:] = 0 # reset npts = [len(arr) for arr in self._converted[name]] raw_field[:len(npts), 0] = npts raw_field[1:, 1] = (np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize) raw_field[:, 1][:] += heapsize heapsize += raw_field[:, 0].sum() * dtype.itemsize # Even if this VLA has not been read or updated, we need to # include the size of its constituent arrays in the heap size # total if isinstance(recformat, _FormatX) and name in self._converted: _wrapx(self._converted[name], raw_field, recformat.repeat) continue _str, _bool, _number, _scale, _zero, bscale, bzero, _ = \ self._get_scale_factors(column) field = self._converted.get(name, raw_field) # conversion for both ASCII and binary tables if _number or _str: if _number and (_scale or _zero) and column._physical_values: dummy = field.copy() if _zero: dummy -= bzero if _scale: dummy /= bscale # This will set the raw values in the recarray back to # their non-physical storage values, so the column should # be mark is not scaled column._physical_values = False elif _str or isinstance(self._coldefs, _AsciiColDefs): dummy = field else: continue # ASCII table, convert numbers to strings if isinstance(self._coldefs, _AsciiColDefs): self._scale_back_ascii(indx, dummy, raw_field) # binary table string column elif isinstance(raw_field, chararray.chararray): self._scale_back_strings(indx, dummy, raw_field) # all other binary table columns else: if len(raw_field) and isinstance(raw_field[0], np.integer): dummy = np.around(dummy) if raw_field.shape == dummy.shape: raw_field[:] = dummy else: # Reshaping the data is necessary in cases where the # TDIMn keyword was used to shape a column's entries # into arrays raw_field[:] = dummy.ravel().view(raw_field.dtype) del dummy # ASCII table does not have Boolean type elif _bool and name in self._converted: choices = (np.array([ord('F')], dtype=np.int8)[0], np.array([ord('T')], dtype=np.int8)[0]) raw_field[:] = np.choose(field, choices) # Store the updated heapsize self._heapsize = heapsize def _scale_back_strings(self, col_idx, input_field, output_field): # There are a few possibilities this has to be able to handle properly # The input_field, which comes from the _converted column is of dtype # 'Un' so that elements read out of the array are normal str # objects (i.e. unicode strings) # # At the other end the *output_field* may also be of type 'S' or of # type 'U'. It will *usually* be of type 'S' because when reading # an existing FITS table the raw data is just ASCII strings, and # represented in Numpy as an S array. However, when a user creates # a new table from scratch, they *might* pass in a column containing # unicode strings (dtype 'U'). Therefore the output_field of the # raw array is actually a unicode array. But we still want to make # sure the data is encodable as ASCII. Later when we write out the # array we use, in the dtype 'U' case, a different write routine # that writes row by row and encodes any 'U' columns to ASCII. # If the output_field is non-ASCII we will worry about ASCII encoding # later when writing; otherwise we can do it right here if input_field.dtype.kind == 'U' and output_field.dtype.kind == 'S': try: _ascii_encode(input_field, out=output_field) except _UnicodeArrayEncodeError as exc: raise ValueError( "Could not save column '{0}': Contains characters that " "cannot be encoded as ASCII as required by FITS, starting " "at the index {1!r} of the column, and the index {2} of " "the string at that location.".format( self._coldefs[col_idx].name, exc.index[0] if len(exc.index) == 1 else exc.index, exc.start)) else: # Otherwise go ahead and do a direct copy into--if both are type # 'U' we'll handle encoding later input_field = input_field.flatten().view(output_field.dtype) output_field.flat[:] = input_field # Ensure that blanks at the end of each string are # converted to nulls instead of spaces, see Trac #15 # and #111 _rstrip_inplace(output_field) def _scale_back_ascii(self, col_idx, input_field, output_field): """ Convert internal array values back to ASCII table representation. The ``input_field`` is the internal representation of the values, and the ``output_field`` is the character array representing the ASCII output that will be written. """ starts = self._coldefs.starts[:] spans = self._coldefs.spans format = self._coldefs[col_idx].format # The the index of the "end" column of the record, beyond # which we can't write end = super().field(-1).itemsize starts.append(end + starts[-1]) if col_idx > 0: lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1] else: lead = 0 if lead < 0: warnings.warn('Column {!r} starting point overlaps the previous ' 'column.'.format(col_idx + 1)) trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx] if trail < 0: warnings.warn('Column {!r} ending point overlaps the next ' 'column.'.format(col_idx + 1)) # TODO: It would be nice if these string column formatting # details were left to a specialized class, as is the case # with FormatX and FormatP if 'A' in format: _pc = '{:' else: _pc = '{:>' fmt = ''.join([_pc, format[1:], ASCII2STR[format[0]], '}', (' ' * trail)]) # Even if the format precision is 0, we should output a decimal point # as long as there is space to do so--not including a decimal point in # a float value is discouraged by the FITS Standard trailing_decimal = (format.precision == 0 and format.format in ('F', 'E', 'D')) # not using numarray.strings's num2char because the # result is not allowed to expand (as C/Python does). for jdx, value in enumerate(input_field): value = fmt.format(value) if len(value) > starts[col_idx + 1] - starts[col_idx]: raise ValueError( "Value {!r} does not fit into the output's itemsize of " "{}.".format(value, spans[col_idx])) if trailing_decimal and value[0] == ' ': # We have some extra space in the field for the trailing # decimal point value = value[1:] + '.' output_field[jdx] = value # Replace exponent separator in floating point numbers if 'D' in format: output_field[:] = output_field.replace(b'E', b'D') def _get_recarray_field(array, key): """ Compatibility function for using the recarray base class's field method. This incorporates the legacy functionality of returning string arrays as Numeric-style chararray objects. """ # Numpy >= 1.10.dev recarray no longer returns chararrays for strings # This is currently needed for backwards-compatibility and for # automatic truncation of trailing whitespace field = np.recarray.field(array, key) if (field.dtype.char in ('S', 'U') and not isinstance(field, chararray.chararray)): field = field.view(chararray.chararray) return field class _UnicodeArrayEncodeError(UnicodeEncodeError): def __init__(self, encoding, object_, start, end, reason, index): super().__init__(encoding, object_, start, end, reason) self.index = index def _ascii_encode(inarray, out=None): """ Takes a unicode array and fills the output string array with the ASCII encodings (if possible) of the elements of the input array. The two arrays must be the same size (though not necessarily the same shape). This is like an inplace version of `np.char.encode` though simpler since it's only limited to ASCII, and hence the size of each character is guaranteed to be 1 byte. If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is just a `UnicodeEncodeError` with an additional attribute for the index of the item that couldn't be encoded. """ out_dtype = np.dtype(('S{0}'.format(inarray.dtype.itemsize // 4), inarray.dtype.shape)) if out is not None: out = out.view(out_dtype) op_dtypes = [inarray.dtype, out_dtype] op_flags = [['readonly'], ['writeonly', 'allocate']] it = np.nditer([inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=['zerosize_ok']) try: for initem, outitem in it: outitem[...] = initem.item().encode('ascii') except UnicodeEncodeError as exc: index = np.unravel_index(it.iterindex, inarray.shape) raise _UnicodeArrayEncodeError(*(exc.args + (index,))) return it.operands[1] def _has_unicode_fields(array): """ Returns True if any fields in a structured array have Unicode dtype. """ dtypes = (d[0] for d in array.dtype.fields.values()) return any(d.kind == 'U' for d in dtypes)
a5bcb3aa8a77a8850706e8d9d943757cdaf9d1103e07e9a1d89dab7e942cbc38
# Licensed under a 3-clause BSD style license - see LICENSE.rst import re import warnings from collections import defaultdict, OrderedDict import numpy as np from . import Header, Card from ... import units as u from ...coordinates import EarthLocation from ...table import Column from ...time import Time, TimeDelta from ...time.core import BARYCENTRIC_SCALES from ...time.formats import FITS_DEPRECATED_SCALES from ...utils.exceptions import AstropyUserWarning # The following is based on the FITS WCS Paper IV, "Representations of time # coordinates in FITS". # http://adsabs.harvard.edu/abs/2015A%26A...574A..36R # FITS WCS standard specified "4-3" form for non-linear coordinate types TCTYP_RE_TYPE = re.compile(r'(?P<type>[A-Z]+)[-]+') TCTYP_RE_ALGO = re.compile(r'(?P<algo>[A-Z]+)\s*') # FITS Time standard specified time units FITS_TIME_UNIT = ['s', 'd', 'a', 'cy', 'min', 'h', 'yr', 'ta', 'Ba'] # Global time reference coordinate keywords TIME_KEYWORDS = ('TIMESYS', 'MJDREF', 'JDREF', 'DATEREF', 'TREFPOS', 'TREFDIR', 'TIMEUNIT', 'TIMEOFFS', 'OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z', 'OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H', 'DATE', 'DATE-OBS', 'DATE-AVG', 'DATE-BEG', 'DATE-END', 'MJD-OBS', 'MJD-AVG', 'MJD-BEG', 'MJD-END') # Column-specific time override keywords COLUMN_TIME_KEYWORDS = ('TCTYP', 'TCUNI', 'TRPOS') # Column-specific keywords regex COLUMN_TIME_KEYWORD_REGEXP = '({0})[0-9]+'.format( '|'.join(COLUMN_TIME_KEYWORDS)) def is_time_column_keyword(keyword): """ Check if the FITS header keyword is a time column-specific keyword. Parameters ---------- keyword : str FITS keyword. """ return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None # Set astropy time global information GLOBAL_TIME_INFO = {'TIMESYS': ('UTC', 'Default time scale'), 'JDREF': (0.0, 'Time columns are jd = jd1 + jd2'), 'TREFPOS': ('TOPOCENTER', 'Time reference position')} def _verify_global_info(global_info): """ Given the global time reference frame information, verify that each global time coordinate attribute will be given a valid value. Parameters ---------- global_info : dict Global time reference frame information. """ # Translate FITS deprecated scale into astropy scale, or else just convert # to lower case for further checks. global_info['scale'] = FITS_DEPRECATED_SCALES.get(global_info['TIMESYS'], global_info['TIMESYS'].lower()) # Verify global time scale if global_info['scale'] not in Time.SCALES: # 'GPS' and 'LOCAL' are FITS recognized time scale values # but are not supported by astropy. if global_info['scale'] == 'gps': warnings.warn( 'Global time scale (TIMESYS) has a FITS recognized time scale ' 'value "GPS". In Astropy, "GPS" is a time from epoch format ' 'which runs synchronously with TAI; GPS is approximately 19 s ' 'ahead of TAI. Hence, this format will be used.', AstropyUserWarning) # Assume that the values are in GPS format global_info['scale'] = 'tai' global_info['format'] = 'gps' if global_info['scale'] == 'local': warnings.warn( 'Global time scale (TIMESYS) has a FITS recognized time scale ' 'value "LOCAL". However, the standard states that "LOCAL" should be ' 'tied to one of the existing scales because it is intrinsically ' 'unreliable and/or ill-defined. Astropy will thus use the default ' 'global time scale "UTC" instead of "LOCAL".', AstropyUserWarning) # Default scale 'UTC' global_info['scale'] = 'utc' global_info['format'] = None else: raise AssertionError( 'Global time scale (TIMESYS) should have a FITS recognized ' 'time scale value (got {!r}). The FITS standard states that ' 'the use of local time scales should be restricted to alternate ' 'coordinates.'.format(global_info['TIMESYS'])) else: # Scale is already set global_info['format'] = None # Check if geocentric global location is specified obs_geo = [global_info[attr] for attr in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z') if attr in global_info] # Location full specification is (X, Y, Z) if len(obs_geo) == 3: global_info['location'] = EarthLocation.from_geocentric(*obs_geo, unit=u.m) else: # Check if geodetic global location is specified (since geocentric failed) # First warn the user if geocentric location is partially specified if obs_geo: warnings.warn( 'The geocentric observatory location {} is not completely ' 'specified (X, Y, Z) and will be ignored.'.format(obs_geo), AstropyUserWarning) # Check geodetic location obs_geo = [global_info[attr] for attr in ('OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H') if attr in global_info] if len(obs_geo) == 3: global_info['location'] = EarthLocation.from_geodetic(*obs_geo) else: # Since both geocentric and geodetic locations are not specified, # location will be None. # Warn the user if geodetic location is partially specified if obs_geo: warnings.warn( 'The geodetic observatory location {} is not completely ' 'specified (lon, lat, alt) and will be ignored.'.format(obs_geo), AstropyUserWarning) global_info['location'] = None # Get global time reference # Keywords are listed in order of precedence, as stated by the standard for key, format_ in (('MJDREF', 'mjd'), ('JDREF', 'jd'), ('DATEREF', 'fits')): if key in global_info: global_info['ref_time'] = {'val': global_info[key], 'format': format_} break else: # If none of the three keywords is present, MJDREF = 0.0 must be assumed global_info['ref_time'] = {'val': 0, 'format': 'mjd'} def _verify_column_info(column_info, global_info): """ Given the column-specific time reference frame information, verify that each column-specific time coordinate attribute has a valid value. Return True if the coordinate column is time, or else return False. Parameters ---------- global_info : dict Global time reference frame information. column_info : dict Column-specific time reference frame override information. """ scale = column_info.get('TCTYP', None) unit = column_info.get('TCUNI', None) location = column_info.get('TRPOS', None) if scale is not None: # Non-linear coordinate types have "4-3" form and are not time coordinates if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]): return False elif scale.lower() in Time.SCALES: column_info['scale'] = scale.lower() column_info['format'] = None elif scale in FITS_DEPRECATED_SCALES.keys(): column_info['scale'] = FITS_DEPRECATED_SCALES[scale] column_info['format'] = None # TCTYPn (scale) = 'TIME' indicates that the column scale is # controlled by the global scale. elif scale == 'TIME': column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] elif scale == 'GPS': warnings.warn( 'Table column "{}" has a FITS recognized time scale value "GPS". ' 'In Astropy, "GPS" is a time from epoch format which runs ' 'synchronously with TAI; GPS runs ahead of TAI approximately ' 'by 19 s. Hence, this format will be used.'.format(column_info), AstropyUserWarning) column_info['scale'] = 'tai' column_info['format'] = 'gps' elif scale == 'LOCAL': warnings.warn( 'Table column "{}" has a FITS recognized time scale value "LOCAL". ' 'However, the standard states that "LOCAL" should be tied to one ' 'of the existing scales because it is intrinsically unreliable ' 'and/or ill-defined. Astropy will thus use the global time scale ' '(TIMESYS) as the default.'. format(column_info), AstropyUserWarning) column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] else: # Coordinate type is either an unrecognized local time scale # or a linear coordinate type return False # If TCUNIn is a time unit or TRPOSn is specified, the column is a time # coordinate. This has to be tested since TCTYP (scale) is not specified. elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None: column_info['scale'] = global_info['scale'] column_info['format'] = global_info['format'] # None of the conditions for time coordinate columns is satisfied else: return False # Check if column-specific reference position TRPOSn is specified if location is not None: # Observatory position (location) needs to be specified only # for 'TOPOCENTER'. if location == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column reference position "TRPOSn" value is "TOPOCENTER". ' 'However, the observatory position is not properly specified. ' 'The FITS standard does not support this and hence reference ' 'position will be ignored.', AstropyUserWarning) else: column_info['location'] = None # Since TRPOSn is not specified, global reference position is # considered. elif global_info['TREFPOS'] == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column reference position "TRPOSn" is not specified. The ' 'default value for it is "TOPOCENTER", but due to unspecified ' 'observatory position, reference position will be ignored.', AstropyUserWarning) else: column_info['location'] = None # Get reference time column_info['ref_time'] = global_info['ref_time'] return True def _get_info_if_time_column(col, global_info): """ Check if a column without corresponding time column keywords in the FITS header represents time or not. If yes, return the time column information needed for its conversion to Time. This is only applicable to the special-case where a column has the name 'TIME' and a time unit. """ # Column with TTYPEn = 'TIME' and lacking any TC*n or time # specific keywords will be controlled by the global keywords. if col.info.name.upper() == 'TIME' and col.info.unit in FITS_TIME_UNIT: column_info = {'scale': global_info['scale'], 'format': global_info['format'], 'ref_time': global_info['ref_time'], 'location': None} if global_info['TREFPOS'] == 'TOPOCENTER': column_info['location'] = global_info['location'] if column_info['location'] is None: warnings.warn( 'Time column "{}" reference position will be ignored ' 'due to unspecified observatory position.'.format(col.info.name), AstropyUserWarning) return column_info return None def _convert_global_time(table, global_info): """ Convert the table metadata for time informational keywords to astropy Time. Parameters ---------- table : `~astropy.table.Table` The table whose time metadata is to be converted. global_info : dict Global time reference frame information. """ # Read in Global Informational keywords as Time for key, value in global_info.items(): # FITS uses a subset of ISO-8601 for DATE-xxx if key.startswith('DATE'): if key not in table.meta: scale = 'utc' if key == 'DATE' else global_info['scale'] try: precision = len(value.split('.')[-1]) if '.' in value else 0 value = Time(value, format='fits', scale=scale, precision=precision) except ValueError: pass table.meta[key] = value # MJD-xxx in MJD according to TIMESYS elif key.startswith('MJD-'): if key not in table.meta: try: value = Time(value, format='mjd', scale=global_info['scale']) except ValueError: pass table.meta[key] = value def _convert_time_column(col, column_info): """ Convert time columns to astropy Time columns. Parameters ---------- col : `~astropy.table.Column` The time coordinate column to be converted to Time. column_info : dict Column-specific time reference frame override information. """ # The code might fail while attempting to read FITS files not written by astropy. try: # ISO-8601 is the only string representation of time in FITS if col.info.dtype.kind in ['S', 'U']: # [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters # from index 20 to the end of string represents the precision precision = max(int(col.info.dtype.str[2:]) - 20, 0) return Time(col, format='fits', scale=column_info['scale'], precision=precision, location=column_info['location']) if column_info['format'] == 'gps': return Time(col, format='gps', location=column_info['location']) # If reference value is 0 for JD or MJD, the column values can be # directly converted to Time, as they are absolute (relative # to a globally accepted zero point). if (column_info['ref_time']['val'] == 0 and column_info['ref_time']['format'] in ['jd', 'mjd']): # (jd1, jd2) where jd = jd1 + jd2 if col.shape[-1] == 2 and col.ndim > 1: return Time(col[..., 0], col[..., 1], scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) else: return Time(col, scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) # Reference time ref_time = Time(column_info['ref_time']['val'], scale=column_info['scale'], format=column_info['ref_time']['format'], location=column_info['location']) # Elapsed time since reference time if col.shape[-1] == 2 and col.ndim > 1: delta_time = TimeDelta(col[..., 0], col[..., 1]) else: delta_time = TimeDelta(col) return ref_time + delta_time except Exception as err: warnings.warn( 'The exception "{}" was encountered while trying to convert the time ' 'column "{}" to Astropy Time.'.format(err, col.info.name), AstropyUserWarning) return col def fits_to_time(hdr, table): """ Read FITS binary table time columns as `~astropy.time.Time`. This method reads the metadata associated with time coordinates, as stored in a FITS binary table header, converts time columns into `~astropy.time.Time` columns and reads global reference times as `~astropy.time.Time` instances. Parameters ---------- hdr : `~astropy.io.fits.header.Header` FITS Header table : `~astropy.table.Table` The table whose time columns are to be read as Time Returns ------- hdr : `~astropy.io.fits.header.Header` Modified FITS Header (time metadata removed) """ # Set defaults for global time scale, reference, etc. global_info = {'TIMESYS': 'UTC', 'TREFPOS': 'TOPOCENTER'} # Set default dictionary for time columns time_columns = defaultdict(OrderedDict) # Make a "copy" (not just a view) of the input header, since it # may get modified. the data is still a "view" (for now) hcopy = hdr.copy(strip=True) # Scan the header for global and column-specific time keywords for key, value, comment in hdr.cards: if key in TIME_KEYWORDS: global_info[key] = value hcopy.remove(key) elif is_time_column_keyword(key): base, idx = re.match(r'([A-Z]+)([0-9]+)', key).groups() time_columns[int(idx)][base] = value hcopy.remove(key) # Verify and get the global time reference frame information _verify_global_info(global_info) _convert_global_time(table, global_info) # Columns with column-specific time (coordinate) keywords if time_columns: for idx, column_info in time_columns.items(): # Check if the column is time coordinate (not spatial) if _verify_column_info(column_info, global_info): colname = table.colnames[idx - 1] # Convert to Time table[colname] = _convert_time_column(table[colname], column_info) # Check for special-cases of time coordinate columns for idx, colname in enumerate(table.colnames): if (idx + 1) not in time_columns: column_info = _get_info_if_time_column(table[colname], global_info) if column_info: table[colname] = _convert_time_column(table[colname], column_info) return hcopy def time_to_fits(table): """ Replace Time columns in a Table with non-mixin columns containing each element as a vector of two doubles (jd1, jd2) and return a FITS header with appropriate time coordinate keywords. jd = jd1 + jd2 represents time in the Julian Date format with high-precision. Parameters ---------- table : `~astropy.table.Table` The table whose Time columns are to be replaced. Returns ------- table : `~astropy.table.Table` The table with replaced Time columns hdr : `~astropy.io.fits.header.Header` Header containing global time reference frame FITS keywords """ # Shallow copy of the input table newtable = table.copy(copy_data=False) # Global time coordinate frame keywords hdr = Header([Card(keyword=key, value=val[0], comment=val[1]) for key, val in GLOBAL_TIME_INFO.items()]) # Store coordinate column-specific metadata newtable.meta['__coordinate_columns__'] = defaultdict(OrderedDict) coord_meta = newtable.meta['__coordinate_columns__'] time_cols = table.columns.isinstance(Time) # Geocentric location location = None for col in time_cols: # By default, Time objects are written in full precision, i.e. we store both # jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for # Time can be stored if the user explicitly chooses to do so. if col.info.serialize_method['fits'] == 'formatted_value': newtable.replace_column(col.info.name, Column(col.value)) continue # The following is necessary to deal with multi-dimensional ``Time`` objects # (i.e. where Time.shape is non-trivial). jd12 = np.array([col.jd1, col.jd2]) # Roll the 0th (innermost) axis backwards, until it lies in the last position # (jd12.ndim) jd12 = np.rollaxis(jd12, 0, jd12.ndim) newtable.replace_column(col.info.name, Column(jd12, unit='d')) # Get column position(index) n = table.colnames.index(col.info.name) + 1 # Time column-specific override keywords coord_meta[col.info.name]['coord_type'] = col.scale.upper() coord_meta[col.info.name]['coord_unit'] = 'd' # Time column reference position if getattr(col, 'location') is None: if location is not None: warnings.warn( 'Time Column "{}" has no specified location, but global Time ' 'Position is present, which will be the default for this column ' 'in FITS specification.'.format(col.info.name), AstropyUserWarning) else: coord_meta[col.info.name]['time_ref_pos'] = 'TOPOCENTER' # Compatibility of Time Scales and Reference Positions if col.scale in BARYCENTRIC_SCALES: warnings.warn( 'Earth Location "TOPOCENTER" for Time Column "{}" is incompatabile ' 'with scale "{}".'.format(col.info.name, col.scale.upper()), AstropyUserWarning) if col.location.size > 1: raise ValueError('Vectorized Location of Time Column "{}" cannot be ' 'written, as it is not supported.'.format(col.info.name)) if location is None: # Set global geocentric location location = col.location hdr.extend([Card(keyword='OBSGEO-{}'.format(dim.upper()), value=getattr(location, dim).to_value(u.m)) for dim in ('x', 'y', 'z')]) elif location != col.location: raise ValueError('Multiple Time Columns with different geocentric ' 'observatory locations ({}, {}) encountered.' 'This is not supported by the FITS standard.' .format(location, col.location)) return newtable, hdr
f5ba2049809e4ebe76c0ad048bba0145528f72c7fa5d5a5e0a549ff88400074b
# Licensed under a 3-clause BSD style license - see PYFITS.rst """ Convenience functions ===================== The functions in this module provide shortcuts for some of the most basic operations on FITS files, such as reading and updating the header. They are included directly in the 'astropy.io.fits' namespace so that they can be used like:: astropy.io.fits.getheader(...) These functions are primarily for convenience when working with FITS files in the command-line interpreter. If performing several operations on the same file, such as in a script, it is better to *not* use these functions, as each one must open and re-parse the file. In such cases it is better to use :func:`astropy.io.fits.open` and work directly with the :class:`astropy.io.fits.HDUList` object and underlying HDU objects. Several of the convenience functions, such as `getheader` and `getdata` support special arguments for selecting which extension HDU to use when working with a multi-extension FITS file. There are a few supported argument formats for selecting the extension. See the documentation for `getdata` for an explanation of all the different formats. .. warning:: All arguments to convenience functions other than the filename that are *not* for selecting the extension HDU should be passed in as keyword arguments. This is to avoid ambiguity and conflicts with the extension arguments. For example, to set NAXIS=1 on the Primary HDU: Wrong:: astropy.io.fits.setval('myimage.fits', 'NAXIS', 1) The above example will try to set the NAXIS value on the first extension HDU to blank. That is, the argument '1' is assumed to specify an extension HDU. Right:: astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1) This will set the NAXIS keyword to 1 on the primary HDU (the default). To specify the first extension HDU use:: astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1) This complexity arises out of the attempt to simultaneously support multiple argument formats that were used in past versions of PyFITS. Unfortunately, it is not possible to support all formats without introducing some ambiguity. A future Astropy release may standardize around a single format and officially deprecate the other formats. """ import operator import os import warnings import numpy as np from .diff import FITSDiff, HDUDiff from .file import FILE_MODES, _File from .hdu.base import _BaseHDU, _ValidHDU from .hdu.hdulist import fitsopen, HDUList from .hdu.image import PrimaryHDU, ImageHDU from .hdu.table import BinTableHDU from .header import Header from .util import fileobj_closed, fileobj_name, fileobj_mode, _is_int from ...units import Unit from ...units.format.fits import UnitScaleError from ...units import Quantity from ...utils.exceptions import AstropyUserWarning from ...utils.decorators import deprecated_renamed_argument __all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto', 'append', 'update', 'info', 'tabledump', 'tableload', 'table_to_hdu', 'printdiff'] def getheader(filename, *args, **kwargs): """ Get the header from an extension of a FITS file. Parameters ---------- filename : file path, file object, or file like object File to get header from. If an opened file object, its mode must be one of the following rb, rb+, or ab+). ext, extname, extver The rest of the arguments are for extension specification. See the `getdata` documentation for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. Returns ------- header : `Header` object """ mode, closed = _get_file_mode(filename) hdulist, extidx = _getext(filename, mode, *args, **kwargs) try: hdu = hdulist[extidx] header = hdu.header finally: hdulist.close(closed=closed) return header def getdata(filename, *args, header=None, lower=None, upper=None, view=None, **kwargs): """ Get the data from an extension of a FITS file (and optionally the header). Parameters ---------- filename : file path, file object, or file like object File to get data from. If opened, mode must be one of the following rb, rb+, or ab+. ext The rest of the arguments are for extension specification. They are flexible and are best illustrated by examples. No extra arguments implies the primary header:: getdata('in.fits') By extension number:: getdata('in.fits', 0) # the primary header getdata('in.fits', 2) # the second extension getdata('in.fits', ext=2) # the second extension By name, i.e., ``EXTNAME`` value (if unique):: getdata('in.fits', 'sci') getdata('in.fits', extname='sci') # equivalent Note ``EXTNAME`` values are not case sensitive By combination of ``EXTNAME`` and EXTVER`` as separate arguments or as a tuple:: getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 getdata('in.fits', extname='sci', extver=2) # equivalent getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception:: getdata('in.fits', ext=('sci',1), extname='err', extver=2) header : bool, optional If `True`, return the data and the header of the specified HDU as a tuple. lower, upper : bool, optional If ``lower`` or ``upper`` are `True`, the field names in the returned data object will be converted to lower or upper case, respectively. view : ndarray, optional When given, the data will be returned wrapped in the given ndarray subclass by calling:: data.view(view) kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. Returns ------- array : array, record array or groups data object Type depends on the type of the extension being referenced. If the optional keyword ``header`` is set to `True`, this function will return a (``data``, ``header``) tuple. """ mode, closed = _get_file_mode(filename) hdulist, extidx = _getext(filename, mode, *args, **kwargs) try: hdu = hdulist[extidx] data = hdu.data if data is None and extidx == 0: try: hdu = hdulist[1] data = hdu.data except IndexError: raise IndexError('No data in this HDU.') if data is None: raise IndexError('No data in this HDU.') if header: hdr = hdu.header finally: hdulist.close(closed=closed) # Change case of names if requested trans = None if lower: trans = operator.methodcaller('lower') elif upper: trans = operator.methodcaller('upper') if trans: if data.dtype.names is None: # this data does not have fields return if data.dtype.descr[0][0] == '': # this data does not have fields return data.dtype.names = [trans(n) for n in data.dtype.names] # allow different views into the underlying ndarray. Keep the original # view just in case there is a problem if isinstance(view, type) and issubclass(view, np.ndarray): data = data.view(view) if header: return data, hdr else: return data def getval(filename, keyword, *args, **kwargs): """ Get a keyword's value from a header in a FITS file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object (if opened, mode must be one of the following rb, rb+, or ab+). keyword : str Keyword name ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. Returns ------- keyword value : str, int, or float """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True hdr = getheader(filename, *args, **kwargs) return hdr[keyword] def setval(filename, keyword, *args, value=None, comment=None, before=None, after=None, savecomment=False, **kwargs): """ Set a keyword's value from a header in a FITS file. If the keyword already exists, it's value/comment will be updated. If it does not exist, a new card will be created and it will be placed before or after the specified location. If no ``before`` or ``after`` is specified, it will be appended at the end. When updating more than one keyword in a file, this convenience function is a much less efficient approach compared with opening the file for update, modifying the header, and closing the file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. keyword : str Keyword name value : str, int, float, optional Keyword value (default: `None`, meaning don't modify) comment : str, optional Keyword comment, (default: `None`, meaning don't modify) before : str, int, optional Name of the keyword, or index of the card before which the new card will be placed. The argument ``before`` takes precedence over ``after`` if both are specified (default: `None`). after : str, int, optional Name of the keyword, or index of the card after which the new card will be placed. (default: `None`). savecomment : bool, optional When `True`, preserve the current comment for an existing keyword. The argument ``savecomment`` takes precedence over ``comment`` if both specified. If ``comment`` is not specified then the current comment will automatically be preserved (default: `False`). ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True closed = fileobj_closed(filename) hdulist, extidx = _getext(filename, 'update', *args, **kwargs) try: if keyword in hdulist[extidx].header and savecomment: comment = None hdulist[extidx].header.set(keyword, value, comment, before, after) finally: hdulist.close(closed=closed) def delval(filename, keyword, *args, **kwargs): """ Delete all instances of keyword from a header in a FITS file. Parameters ---------- filename : file path, file object, or file like object Name of the FITS file, or file object If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. keyword : str, int Keyword name or index ext, extname, extver The rest of the arguments are for extension specification. See `getdata` for explanations/examples. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function automatically specifies ``do_not_scale_image_data = True`` when opening the file so that values can be retrieved from the unmodified header. """ if 'do_not_scale_image_data' not in kwargs: kwargs['do_not_scale_image_data'] = True closed = fileobj_closed(filename) hdulist, extidx = _getext(filename, 'update', *args, **kwargs) try: del hdulist[extidx].header[keyword] finally: hdulist.close(closed=closed) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def writeto(filename, data, header=None, output_verify='exception', overwrite=False, checksum=False): """ Create a new FITS file using the supplied data/header. Parameters ---------- filename : file path, file object, or file like object File to write to. If opened, must be opened in a writeable binary mode such as 'wb' or 'ab+'. data : array, record array, or groups data object data to write to the new file header : `Header` object, optional the header associated with ``data``. If `None`, a header of the appropriate type is created for the supplied data. This argument is optional. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. checksum : bool, optional If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. """ hdu = _makehdu(data, header) if hdu.is_image and not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify, checksum=checksum) def table_to_hdu(table, character_as_bytes=False): """ Convert an `~astropy.table.Table` object to a FITS `~astropy.io.fits.BinTableHDU`. Parameters ---------- table : astropy.table.Table The table to convert. character_as_bytes : bool Whether to return bytes for string columns when accessed from the HDU. By default this is `False` and (unicode) strings are returned, but for large tables this may use up a lot of memory. Returns ------- table_hdu : `~astropy.io.fits.BinTableHDU` The FITS binary table HDU. """ # Avoid circular imports from .connect import is_column_keyword, REMOVE_KEYWORDS # Header to store Time related metadata hdr = None # Not all tables with mixin columns are supported if table.has_mixin_columns: # Import is done here, in order to avoid it at build time as erfa is not # yet available then. from ...table.column import BaseColumn, Column from ...time import Time from .fitstime import time_to_fits # Only those columns which are instances of BaseColumn, Quantity or Time can # be written unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time)) if unsupported_cols: unsupported_names = [col.info.name for col in unsupported_cols] raise ValueError('cannot write table with mixin column(s) {0}' .format(unsupported_names)) time_cols = table.columns.isinstance(Time) if time_cols: table, hdr = time_to_fits(table) # Create a new HDU object if table.masked: # float column's default mask value needs to be Nan for column in table.columns.values(): fill_value = column.get_fill_value() if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20): column.set_fill_value(np.nan) # TODO: it might be better to construct the FITS table directly from # the Table columns, rather than go via a structured array. table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=True) for col in table_hdu.columns: # Binary FITS tables support TNULL *only* for integer data columns # TODO: Determine a schema for handling non-integer masked columns # in FITS (if at all possible) int_formats = ('B', 'I', 'J', 'K') if not (col.format in int_formats or col.format.p_format in int_formats): continue # The astype is necessary because if the string column is less # than one character, the fill value will be N/A by default which # is too long, and so no values will get masked. fill_value = table[col.name].get_fill_value() col.null = fill_value.astype(table[col.name].dtype) else: table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=character_as_bytes) # Set units for output HDU for col in table_hdu.columns: unit = table[col.name].unit if unit is not None: try: col.unit = unit.to_string(format='fits') except UnitScaleError: scale = unit.scale raise UnitScaleError( "The column '{0}' could not be stored in FITS format " "because it has a scale '({1})' that " "is not recognized by the FITS standard. Either scale " "the data or change the units.".format(col.name, str(scale))) except ValueError: warnings.warn( "The unit '{0}' could not be saved to FITS format".format( unit.to_string()), AstropyUserWarning) # Try creating a Unit to issue a warning if the unit is not FITS compliant Unit(col.unit, format='fits', parse_strict='warn') # Column-specific override keywords for coordinate columns coord_meta = table.meta.pop('__coordinate_columns__', {}) for col_name, col_info in coord_meta.items(): col = table_hdu.columns[col_name] # Set the column coordinate attributes from data saved earlier. # Note: have to set all three, even if we have no data. for attr in 'coord_type', 'coord_unit', 'time_ref_pos': setattr(col, attr, col_info.get(attr, None)) for key, value in table.meta.items(): if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS: warnings.warn( "Meta-data keyword {0} will be ignored since it conflicts " "with a FITS reserved keyword".format(key), AstropyUserWarning) # Convert to FITS format if key == 'comments': key = 'comment' if isinstance(value, list): for item in value: try: table_hdu.header.append((key, item)) except ValueError: warnings.warn( "Attribute `{0}` of type {1} cannot be added to " "FITS Header - skipping".format(key, type(value)), AstropyUserWarning) else: try: table_hdu.header[key] = value except ValueError: warnings.warn( "Attribute `{0}` of type {1} cannot be added to FITS " "Header - skipping".format(key, type(value)), AstropyUserWarning) return table_hdu def append(filename, data, header=None, checksum=False, verify=True, **kwargs): """ Append the header/data to FITS file if filename exists, create if not. If only ``data`` is supplied, a minimal header is created. Parameters ---------- filename : file path, file object, or file like object File to write to. If opened, must be opened for update (rb+) unless it is a new file, then it must be opened for append (ab+). A file or `~gzip.GzipFile` object opened for update will be closed after return. data : array, table, or group data object the new data used for appending header : `Header` object, optional The header associated with ``data``. If `None`, an appropriate header will be created for the data object supplied. checksum : bool, optional When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header of the HDU when written to the file. verify : bool, optional When `True`, the existing FITS file will be read in to verify it for correctness before appending. When `False`, content is simply appended to the end of the file. Setting ``verify`` to `False` can be much faster. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. """ name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename) if noexist_or_empty: # # The input file or file like object either doesn't exits or is # empty. Use the writeto convenience function to write the # output to the empty object. # writeto(filename, data, header, checksum=checksum, **kwargs) else: hdu = _makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) if verify or not closed: f = fitsopen(filename, mode='append') try: f.append(hdu) # Set a flag in the HDU so that only this HDU gets a checksum # when writing the file. hdu._output_checksum = checksum finally: f.close(closed=closed) else: f = _File(filename, mode='append') try: hdu._output_checksum = checksum hdu._writeto(f) finally: f.close() def update(filename, data, *args, **kwargs): """ Update the specified extension with the input data/header. Parameters ---------- filename : file path, file object, or file like object File to update. If opened, mode must be update (rb+). An opened file object or `~gzip.GzipFile` object will be closed upon return. data : array, table, or group data object the new data used for updating header : `Header` object, optional The header associated with ``data``. If `None`, an appropriate header will be created for the data object supplied. ext, extname, extver The rest of the arguments are flexible: the 3rd argument can be the header associated with the data. If the 3rd argument is not a `Header`, it (and other positional arguments) are assumed to be the extension specification(s). Header and extension specs can also be keyword arguments. For example:: update(file, dat, hdr, 'sci') # update the 'sci' extension update(file, dat, 3) # update the 3rd extension update(file, dat, hdr, 3) # update the 3rd extension update(file, dat, 'sci', 2) # update the 2nd SCI extension update(file, dat, 3, header=hdr) # update the 3rd extension update(file, dat, header=hdr, ext=5) # update the 5th extension kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. """ # The arguments to this function are a bit trickier to deal with than others # in this module, since the documentation has promised that the header # argument can be an optional positional argument. if args and isinstance(args[0], Header): header = args[0] args = args[1:] else: header = None # The header can also be a keyword argument--if both are provided the # keyword takes precedence header = kwargs.pop('header', header) new_hdu = _makehdu(data, header) closed = fileobj_closed(filename) hdulist, _ext = _getext(filename, 'update', *args, **kwargs) try: hdulist[_ext] = new_hdu finally: hdulist.close(closed=closed) def info(filename, output=None, **kwargs): """ Print the summary information on a FITS file. This includes the name, type, length of header, data shape and type for each extension. Parameters ---------- filename : file path, file object, or file like object FITS file to obtain info from. If opened, mode must be one of the following: rb, rb+, or ab+ (i.e. the file must be readable). output : file, bool, optional A file-like object to write the output to. If ``False``, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. kwargs Any additional keyword arguments to be passed to `astropy.io.fits.open`. *Note:* This function sets ``ignore_missing_end=True`` by default. """ mode, closed = _get_file_mode(filename, default='readonly') # Set the default value for the ignore_missing_end parameter if 'ignore_missing_end' not in kwargs: kwargs['ignore_missing_end'] = True f = fitsopen(filename, mode=mode, **kwargs) try: ret = f.info(output=output) finally: if closed: f.close() return ret def printdiff(inputa, inputb, *args, **kwargs): """ Compare two parts of a FITS file, including entire FITS files, FITS `HDUList` objects and FITS ``HDU`` objects. Parameters ---------- inputa : str, `HDUList` object, or ``HDU`` object The filename of a FITS file, `HDUList`, or ``HDU`` object to compare to ``inputb``. inputb : str, `HDUList` object, or ``HDU`` object The filename of a FITS file, `HDUList`, or ``HDU`` object to compare to ``inputa``. ext, extname, extver Additional positional arguments are for extension specification if your inputs are string filenames (will not work if ``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects). They are flexible and are best illustrated by examples. In addition to using these arguments positionally you can directly call the keyword parameters ``ext``, ``extname``. By extension number:: printdiff('inA.fits', 'inB.fits', 0) # the primary HDU printdiff('inA.fits', 'inB.fits', 2) # the second extension printdiff('inA.fits', 'inB.fits', ext=2) # the second extension By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are not case sensitive: printdiff('inA.fits', 'inB.fits', 'sci') printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent By combination of ``EXTNAME`` and ``EXTVER`` as separate arguments or as a tuple:: printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI' # & EXTVER=2 printdiff('inA.fits', 'inB.fits', extname='sci', extver=2) # equivalent printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception:: printdiff('inA.fits', 'inB.fits', ext=('sci', 1), extname='err', extver=2) kwargs Any additional keyword arguments to be passed to `~astropy.io.fits.FITSDiff`. Notes ----- The primary use for the `printdiff` function is to allow quick print out of a FITS difference report and will write to ``sys.stdout``. To save the diff report to a file please use `~astropy.io.fits.FITSDiff` directly. """ # Pop extension keywords extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver'] if key in kwargs} has_extensions = args or extension if isinstance(inputa, str) and has_extensions: # Use handy _getext to interpret any ext keywords, but # will need to close a if fails modea, closeda = _get_file_mode(inputa) modeb, closedb = _get_file_mode(inputb) hdulista, extidxa = _getext(inputa, modea, *args, **extension) # Have to close a if b doesn't make it try: hdulistb, extidxb = _getext(inputb, modeb, *args, **extension) except Exception: hdulista.close(closed=closeda) raise try: hdua = hdulista[extidxa] hdub = hdulistb[extidxb] # See below print for note print(HDUDiff(hdua, hdub, **kwargs).report()) finally: hdulista.close(closed=closeda) hdulistb.close(closed=closedb) # If input is not a string, can feed HDU objects or HDUList directly, # but can't currently handle extensions elif isinstance(inputa, _ValidHDU) and has_extensions: raise ValueError("Cannot use extension keywords when providing an " "HDU object.") elif isinstance(inputa, _ValidHDU) and not has_extensions: print(HDUDiff(inputa, inputb, **kwargs).report()) elif isinstance(inputa, HDUList) and has_extensions: raise NotImplementedError("Extension specification with HDUList " "objects not implemented.") # This function is EXCLUSIVELY for printing the diff report to screen # in a one-liner call, hence the use of print instead of logging else: print(FITSDiff(inputa, inputb, **kwargs).report()) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1, overwrite=False): """ Dump a table HDU to a file in ASCII format. The table may be dumped in three separate files, one containing column definitions, one containing header parameters, and one for table data. Parameters ---------- filename : file path, file object or file-like object Input fits file. datafile : file path, file object or file-like object, optional Output data file. The default is the root name of the input fits file appended with an underscore, followed by the extension number (ext), followed by the extension ``.txt``. cdfile : file path, file object or file-like object, optional Output column definitions file. The default is `None`, no column definitions output is produced. hfile : file path, file object or file-like object, optional Output header parameters file. The default is `None`, no header parameters output is produced. ext : int The number of the extension containing the table HDU to be dumped. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. Notes ----- The primary use for the `tabledump` function is to allow editing in a standard text editor of the table data and parameters. The `tableload` function can be used to reassemble the table from the three ASCII files. """ # allow file object to already be opened in any of the valid modes # and leave the file in the same state (opened or closed) as when # the function was called mode, closed = _get_file_mode(filename, default='readonly') f = fitsopen(filename, mode=mode) # Create the default data file name if one was not provided try: if not datafile: root, tail = os.path.splitext(f._file.name) datafile = root + '_' + repr(ext) + '.txt' # Dump the data from the HDU to the files f[ext].dump(datafile, cdfile, hfile, overwrite) finally: if closed: f.close() if isinstance(tabledump.__doc__, str): tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ') def tableload(datafile, cdfile, hfile=None): """ Create a table from the input ASCII files. The input is from up to three separate files, one containing column definitions, one containing header parameters, and one containing column data. The header parameters file is not required. When the header parameters file is absent a minimal header is constructed. Parameters ---------- datafile : file path, file object or file-like object Input data file containing the table data in ASCII format. cdfile : file path, file object or file-like object Input column definition file containing the names, formats, display formats, physical units, multidimensional array dimensions, undefined values, scale factors, and offsets associated with the columns in the table. hfile : file path, file object or file-like object, optional Input parameter definition file containing the header parameter definitions to be associated with the table. If `None`, a minimal header is constructed. Notes ----- The primary use for the `tableload` function is to allow the input of ASCII data that was edited in a standard text editor of the table data and parameters. The tabledump function can be used to create the initial ASCII files. """ return BinTableHDU.load(datafile, cdfile, hfile, replace=True) if isinstance(tableload.__doc__, str): tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ') def _getext(filename, mode, *args, ext=None, extname=None, extver=None, **kwargs): """ Open the input file, return the `HDUList` and the extension. This supports several different styles of extension selection. See the :func:`getdata()` documentation for the different possibilities. """ err_msg = ('Redundant/conflicting extension arguments(s): {}'.format( {'args': args, 'ext': ext, 'extname': extname, 'extver': extver})) # This code would be much simpler if just one way of specifying an # extension were picked. But now we need to support all possible ways for # the time being. if len(args) == 1: # Must be either an extension number, an extension name, or an # (extname, extver) tuple if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2): if ext is not None or extname is not None or extver is not None: raise TypeError(err_msg) ext = args[0] elif isinstance(args[0], str): # The first arg is an extension name; it could still be valid # to provide an extver kwarg if ext is not None or extname is not None: raise TypeError(err_msg) extname = args[0] else: # Take whatever we have as the ext argument; we'll validate it # below ext = args[0] elif len(args) == 2: # Must be an extname and extver if ext is not None or extname is not None or extver is not None: raise TypeError(err_msg) extname = args[0] extver = args[1] elif len(args) > 2: raise TypeError('Too many positional arguments.') if (ext is not None and not (_is_int(ext) or (isinstance(ext, tuple) and len(ext) == 2 and isinstance(ext[0], str) and _is_int(ext[1])))): raise ValueError( 'The ext keyword must be either an extension number ' '(zero-indexed) or a (extname, extver) tuple.') if extname is not None and not isinstance(extname, str): raise ValueError('The extname argument must be a string.') if extver is not None and not _is_int(extver): raise ValueError('The extver argument must be an integer.') if ext is None and extname is None and extver is None: ext = 0 elif ext is not None and (extname is not None or extver is not None): raise TypeError(err_msg) elif extname: if extver: ext = (extname, extver) else: ext = (extname, 1) elif extver and extname is None: raise TypeError('extver alone cannot specify an extension.') hdulist = fitsopen(filename, mode=mode, **kwargs) return hdulist, ext def _makehdu(data, header): if header is None: header = Header() hdu = _BaseHDU(data, header) if hdu.__class__ in (_BaseHDU, _ValidHDU): # The HDU type was unrecognized, possibly due to a # nonexistent/incomplete header if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or isinstance(data, np.recarray)): hdu = BinTableHDU(data, header=header) elif isinstance(data, np.ndarray): hdu = ImageHDU(data, header=header) else: raise KeyError('Data must be a numpy array.') return hdu def _stat_filename_or_fileobj(filename): closed = fileobj_closed(filename) name = fileobj_name(filename) or '' try: loc = filename.tell() except AttributeError: loc = 0 noexist_or_empty = ((name and (not os.path.exists(name) or (os.path.getsize(name) == 0))) or (not name and loc == 0)) return name, closed, noexist_or_empty def _get_file_mode(filename, default='readonly'): """ Allow file object to already be opened in any of the valid modes and and leave the file in the same state (opened or closed) as when the function was called. """ mode = default closed = fileobj_closed(filename) fmode = fileobj_mode(filename) if fmode is not None: mode = FILE_MODES.get(fmode) if mode is None: raise OSError( "File mode of the input file object ({!r}) cannot be used to " "read/write FITS files.".format(fmode)) return mode, closed
b526a1e03c0ebf94c145ec1dd5ad900f469f52512be1d1cc411dea200698fa37
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os from distutils.core import Extension from glob import glob from astropy_helpers import setup_helpers from astropy_helpers.distutils_helpers import get_distutils_build_option def _get_compression_extension(): # 'numpy' will be replaced with the proper path to the numpy includes cfg = setup_helpers.DistutilsExtensionArgs() cfg['include_dirs'].append('numpy') cfg['sources'].append(os.path.join(os.path.dirname(__file__), 'src', 'compressionmodule.c')) if not setup_helpers.use_system_library('cfitsio'): if setup_helpers.get_compiler_option() == 'msvc': # These come from the CFITSIO vcc makefile, except the last # which ensures on windows we do not include unistd.h (in regular # compilation of cfitsio, an empty file would be generated) cfg['extra_compile_args'].extend( ['/D', '"WIN32"', '/D', '"_WINDOWS"', '/D', '"_MBCS"', '/D', '"_USRDLL"', '/D', '"_CRT_SECURE_NO_DEPRECATE"', '/D', '"FF_NO_UNISTD_H"']) else: cfg['extra_compile_args'].extend([ '-Wno-declaration-after-statement' ]) if not get_distutils_build_option('debug'): # these switches are to silence warnings from compiling CFITSIO # For full silencing, some are added that only are used in # later versions of gcc (versions approximate; see #6474) cfg['extra_compile_args'].extend([ '-Wno-strict-prototypes', '-Wno-unused', '-Wno-uninitialized', '-Wno-unused-result', # gcc >~4.8 '-Wno-misleading-indentation', # gcc >~7.2 '-Wno-format-overflow', # gcc >~7.2 ]) cfitsio_lib_path = os.path.join('cextern', 'cfitsio', 'lib') cfitsio_zlib_path = os.path.join('cextern', 'cfitsio', 'zlib') cfitsio_files = glob(os.path.join(cfitsio_lib_path, '*.c')) cfitsio_zlib_files = glob(os.path.join(cfitsio_zlib_path, '*.c')) cfg['include_dirs'].append(cfitsio_lib_path) cfg['include_dirs'].append(cfitsio_zlib_path) cfg['sources'].extend(cfitsio_files) cfg['sources'].extend(cfitsio_zlib_files) else: cfg.update(setup_helpers.pkg_config(['cfitsio'], ['cfitsio'])) return Extension('astropy.io.fits.compression', **cfg) def get_extensions(): return [_get_compression_extension()] def get_package_data(): # Installs the testing data files return { 'astropy.io.fits.tests': [os.path.join('data', '*.fits')]} def get_external_libraries(): return ['cfitsio']
d5962d0bfcd2c2036e9f6e9bfe01195f204e7783a0aa70a83c8db123083de1f3
""" This module is for functions that do tricky things with Numpy arrays and dtypes that are not normally supported in Numpy (but can work in limited cases relevant to FITS) or that otherwise require workarounds. """ def realign_dtype(dtype, offsets): """ Given a Numpy struct dtype object an a list of integer offsets, with one offset per field in the dtype, returns a new dtype where each field has the given offset. All offsets must be non-negative integers, but otherwise have no restrictions, and may overlap, per the usual rules for creating struct dtypes. The new dtype will have an itemsize equal to the offset of the right-most field plus the width of that field. One restriction of this function is that it must not be used with object arrays--incorrect offsets may lead to invalid pointers in the arrays. However, this function is really only meant for use by astropy.io.fits and object arrays are not supported for FITS data anyhow. This function is used primarily to get around a shortcoming in Numpy that it is currently impossible to create dtypes with arbitrary offsets, *and* that have zero-width fields. Both of these features are needed for full FITS support. However, this will be fixed in a future version of Numpy at which point use of this hack can be deprecated. See https://github.com/numpy/numpy/pull/6430 """ # Previously this was implemented in C, but then I realized that the C # version is not needed--the workaround is to use dtype.__setstate__ # Note: There is a comment in the Numpy source code (see # https://github.com/numpy/numpy/blob/v1.10.1/numpy/core/src/multiarray/descriptor.c#L2226) # that this may be changed at some point. But hopefully by then the fixes # in #6430 will be implemented, making this hack unnecessary to begin with. cls, args, state = dtype.__reduce__() names, fields = state[3:5] fields = fields.copy() itemsize = 0 # We will re-determine the itemsize based on the type # of the field with the largest (offset + itemsize) if fields is None or len(offsets) != len(names): raise ValueError( "Dtype must be a structured dtype, and length of offsets list " "must be the same as the number of fields.") for name, offset in zip(names, offsets): field = fields[name] itemsize = max(itemsize, offset + field[0].itemsize) if offset != field[1]: fields[name] = (field[0], offset) new_typespec = '|V{0}'.format(itemsize) new_state = state[:4] + (fields, itemsize) + state[6:] new_dtype = cls(new_typespec, *args[1:]) new_dtype.__setstate__(new_state) return new_dtype
ef448ddff2d4153d7713fa0e608603c7d0a9d7ee53a818017c0be75676bfcc60
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import itertools import io import mmap import operator import os import platform import signal import sys import tempfile import textwrap import threading import warnings import weakref from contextlib import contextmanager, suppress from ...utils import data from distutils.version import LooseVersion import numpy as np from ...utils import wraps from ...utils.exceptions import AstropyUserWarning cmp = lambda a, b: (a > b) - (a < b) all_integer_types = (int, np.integer) class NotifierMixin: """ Mixin class that provides services by which objects can register listeners to changes on that object. All methods provided by this class are underscored, since this is intended for internal use to communicate between classes in a generic way, and is not machinery that should be exposed to users of the classes involved. Use the ``_add_listener`` method to register a listener on an instance of the notifier. This registers the listener with a weak reference, so if no other references to the listener exist it is automatically dropped from the list and does not need to be manually removed. Call the ``_notify`` method on the notifier to update all listeners upon changes. ``_notify('change_type', *args, **kwargs)`` results in calling ``listener._update_change_type(*args, **kwargs)`` on all listeners subscribed to that notifier. If a particular listener does not have the appropriate update method it is ignored. Examples -------- >>> class Widget(NotifierMixin): ... state = 1 ... def __init__(self, name): ... self.name = name ... def update_state(self): ... self.state += 1 ... self._notify('widget_state_changed', self) ... >>> class WidgetListener: ... def _update_widget_state_changed(self, widget): ... print('Widget {0} changed state to {1}'.format( ... widget.name, widget.state)) ... >>> widget = Widget('fred') >>> listener = WidgetListener() >>> widget._add_listener(listener) >>> widget.update_state() Widget fred changed state to 2 """ _listeners = None def _add_listener(self, listener): """ Add an object to the list of listeners to notify of changes to this object. This adds a weakref to the list of listeners that is removed from the listeners list when the listener has no other references to it. """ if self._listeners is None: self._listeners = weakref.WeakValueDictionary() self._listeners[id(listener)] = listener def _remove_listener(self, listener): """ Removes the specified listener from the listeners list. This relies on object identity (i.e. the ``is`` operator). """ if self._listeners is None: return with suppress(KeyError): del self._listeners[id(listener)] def _notify(self, notification, *args, **kwargs): """ Notify all listeners of some particular state change by calling their ``_update_<notification>`` method with the given ``*args`` and ``**kwargs``. The notification does not by default include the object that actually changed (``self``), but it certainly may if required. """ if self._listeners is None: return method_name = '_update_{0}'.format(notification) for listener in self._listeners.valuerefs(): # Use valuerefs instead of itervaluerefs; see # https://github.com/astropy/astropy/issues/4015 listener = listener() # dereference weakref if listener is None: continue if hasattr(listener, method_name): method = getattr(listener, method_name) if callable(method): method(*args, **kwargs) def __getstate__(self): """ Exclude listeners when saving the listener's state, since they may be ephemeral. """ # TODO: This hasn't come up often, but if anyone needs to pickle HDU # objects it will be necessary when HDU objects' states are restored to # re-register themselves as listeners on their new column instances. try: state = super().__getstate__() except AttributeError: # Chances are the super object doesn't have a getstate state = self.__dict__.copy() state['_listeners'] = None return state def first(iterable): """ Returns the first item returned by iterating over an iterable object. Example: >>> a = [1, 2, 3] >>> first(a) 1 """ return next(iter(iterable)) def itersubclasses(cls, _seen=None): """ Generator over all subclasses of a given class, in depth first order. >>> class A: pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] [...'tuple', ...'type', ...] From http://code.activestate.com/recipes/576949/ """ if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in sorted(subs, key=operator.attrgetter('__name__')): if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub def ignore_sigint(func): """ This decorator registers a custom SIGINT handler to catch and ignore SIGINT until the wrapped function is completed. """ @wraps(func) def wrapped(*args, **kwargs): # Get the name of the current thread and determine if this is a single # threaded application curr_thread = threading.currentThread() single_thread = (threading.activeCount() == 1 and curr_thread.getName() == 'MainThread') class SigintHandler: def __init__(self): self.sigint_received = False def __call__(self, signum, frame): warnings.warn('KeyboardInterrupt ignored until {} is ' 'complete!'.format(func.__name__), AstropyUserWarning) self.sigint_received = True sigint_handler = SigintHandler() # Define new signal interput handler if single_thread: # Install new handler old_handler = signal.signal(signal.SIGINT, sigint_handler) try: func(*args, **kwargs) finally: if single_thread: if old_handler is not None: signal.signal(signal.SIGINT, old_handler) else: signal.signal(signal.SIGINT, signal.SIG_DFL) if sigint_handler.sigint_received: raise KeyboardInterrupt return wrapped def pairwise(iterable): """Return the items of an iterable paired with its next item. Ex: s -> (s0,s1), (s1,s2), (s2,s3), .... """ a, b = itertools.tee(iterable) for _ in b: # Just a little trick to advance b without having to catch # StopIter if b happens to be empty break return zip(a, b) def encode_ascii(s): if isinstance(s, str): return s.encode('ascii') elif (isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.str_)): ns = np.char.encode(s, 'ascii').view(type(s)) if ns.dtype.itemsize != s.dtype.itemsize / 4: ns = ns.astype((np.bytes_, s.dtype.itemsize / 4)) return ns elif (isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.bytes_)): raise TypeError('string operation on non-string array') return s def decode_ascii(s): if isinstance(s, bytes): try: return s.decode('ascii') except UnicodeDecodeError: warnings.warn('non-ASCII characters are present in the FITS ' 'file header and have been replaced by "?" ' 'characters', AstropyUserWarning) s = s.decode('ascii', errors='replace') return s.replace(u'\ufffd', '?') elif (isinstance(s, np.ndarray) and issubclass(s.dtype.type, np.bytes_)): # np.char.encode/decode annoyingly don't preserve the type of the # array, hence the view() call # It also doesn't necessarily preserve widths of the strings, # hence the astype() if s.size == 0: # Numpy apparently also has a bug that if a string array is # empty calling np.char.decode on it returns an empty float64 # array wth dt = s.dtype.str.replace('S', 'U') ns = np.array([], dtype=dt).view(type(s)) else: ns = np.char.decode(s, 'ascii').view(type(s)) if ns.dtype.itemsize / 4 != s.dtype.itemsize: ns = ns.astype((np.str_, s.dtype.itemsize)) return ns elif (isinstance(s, np.ndarray) and not issubclass(s.dtype.type, np.str_)): # Don't silently pass through on non-string arrays; we don't want # to hide errors where things that are not stringy are attempting # to be decoded raise TypeError('string operation on non-string array') return s def isreadable(f): """ Returns True if the file-like object can be read from. This is a common- sense approximation of io.IOBase.readable. """ if hasattr(f, 'readable'): return f.readable() if hasattr(f, 'closed') and f.closed: # This mimics the behavior of io.IOBase.readable raise ValueError('I/O operation on closed file') if not hasattr(f, 'read'): return False if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'): return False # Not closed, has a 'read()' method, and either has no known mode or a # readable mode--should be good enough to assume 'readable' return True def iswritable(f): """ Returns True if the file-like object can be written to. This is a common- sense approximation of io.IOBase.writable. """ if hasattr(f, 'writable'): return f.writable() if hasattr(f, 'closed') and f.closed: # This mimics the behavior of io.IOBase.writable raise ValueError('I/O operation on closed file') if not hasattr(f, 'write'): return False if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'): return False # Note closed, has a 'write()' method, and either has no known mode or a # mode that supports writing--should be good enough to assume 'writable' return True def isfile(f): """ Returns True if the given object represents an OS-level file (that is, ``isinstance(f, file)``). On Python 3 this also returns True if the given object is higher level wrapper on top of a FileIO object, such as a TextIOWrapper. """ if isinstance(f, io.FileIO): return True elif hasattr(f, 'buffer'): return isfile(f.buffer) elif hasattr(f, 'raw'): return isfile(f.raw) return False def fileobj_open(filename, mode): """ A wrapper around the `open()` builtin. This exists because `open()` returns an `io.BufferedReader` by default. This is bad, because `io.BufferedReader` doesn't support random access, which we need in some cases. We must call open with buffering=0 to get a raw random-access file reader. """ return open(filename, mode, buffering=0) def fileobj_name(f): """ Returns the 'name' of file-like object f, if it has anything that could be called its name. Otherwise f's class or type is returned. If f is a string f itself is returned. """ if isinstance(f, str): return f elif isinstance(f, gzip.GzipFile): # The .name attribute on GzipFiles does not always represent the name # of the file being read/written--it can also represent the original # name of the file being compressed # See the documentation at # https://docs.python.org/3/library/gzip.html#gzip.GzipFile # As such, for gzip files only return the name of the underlying # fileobj, if it exists return fileobj_name(f.fileobj) elif hasattr(f, 'name'): return f.name elif hasattr(f, 'filename'): return f.filename elif hasattr(f, '__class__'): return str(f.__class__) else: return str(type(f)) def fileobj_closed(f): """ Returns True if the given file-like object is closed or if f is a string (and assumed to be a pathname). Returns False for all other types of objects, under the assumption that they are file-like objects with no sense of a 'closed' state. """ if isinstance(f, str): return True if hasattr(f, 'closed'): return f.closed elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'): return f.fileobj.closed elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'): return f.fp.closed else: return False def fileobj_mode(f): """ Returns the 'mode' string of a file-like object if such a thing exists. Otherwise returns None. """ # Go from most to least specific--for example gzip objects have a 'mode' # attribute, but it's not analogous to the file.mode attribute # gzip.GzipFile -like if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'): fileobj = f.fileobj # astropy.io.fits._File -like, doesn't need additional checks because it's # already validated elif hasattr(f, 'fileobj_mode'): return f.fileobj_mode # PIL-Image -like investigate the fp (filebuffer) elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'): fileobj = f.fp # FILEIO -like (normal open(...)), keep as is. elif hasattr(f, 'mode'): fileobj = f # Doesn't look like a file-like object, for example strings, urls or paths. else: return None return _fileobj_normalize_mode(fileobj) def _fileobj_normalize_mode(f): """Takes care of some corner cases in Python where the mode string is either oddly formatted or does not truly represent the file mode. """ mode = f.mode # Special case: Gzip modes: if isinstance(f, gzip.GzipFile): # GzipFiles can be either readonly or writeonly if mode == gzip.READ: return 'rb' elif mode == gzip.WRITE: return 'wb' else: return None # This shouldn't happen? # Sometimes Python can produce modes like 'r+b' which will be normalized # here to 'rb+' if '+' in mode: mode = mode.replace('+', '') mode += '+' return mode def fileobj_is_binary(f): """ Returns True if the give file or file-like object has a file open in binary mode. When in doubt, returns True by default. """ # This is kind of a hack for this to work correctly with _File objects, # which, for the time being, are *always* binary if hasattr(f, 'binary'): return f.binary if isinstance(f, io.TextIOBase): return False mode = fileobj_mode(f) if mode: return 'b' in mode else: return True def translate(s, table, deletechars): if deletechars: table = table.copy() for c in deletechars: table[ord(c)] = None return s.translate(table) def fill(text, width, **kwargs): """ Like :func:`textwrap.wrap` but preserves existing paragraphs which :func:`textwrap.wrap` does not otherwise handle well. Also handles section headers. """ paragraphs = text.split('\n\n') def maybe_fill(t): if all(len(l) < width for l in t.splitlines()): return t else: return textwrap.fill(t, width, **kwargs) return '\n\n'.join(maybe_fill(p) for p in paragraphs) # On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to # fail when reading over 2Gb of data. If we detect these versions of MacOS X, # we can instead read the data in chunks. To avoid performance penalties at # import time, we defer the setting of this global variable until the first # time it is needed. CHUNKED_FROMFILE = None def _array_from_file(infile, dtype, count): """Create a numpy array from a file or a file-like object.""" if isfile(infile): global CHUNKED_FROMFILE if CHUNKED_FROMFILE is None: if (sys.platform == 'darwin' and LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')): CHUNKED_FROMFILE = True else: CHUNKED_FROMFILE = False if CHUNKED_FROMFILE: chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe if count < chunk_size: return np.fromfile(infile, dtype=dtype, count=count) else: array = np.empty(count, dtype=dtype) for beg in range(0, count, chunk_size): end = min(count, beg + chunk_size) array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg) return array else: return np.fromfile(infile, dtype=dtype, count=count) else: # treat as file-like object with "read" method; this includes gzip file # objects, because numpy.fromfile just reads the compressed bytes from # their underlying file object, instead of the decompressed bytes read_size = np.dtype(dtype).itemsize * count s = infile.read(read_size) array = np.frombuffer(s, dtype=dtype, count=count) # copy is needed because np.frombuffer returns a read-only view of the # underlying buffer array = array.copy() return array _OSX_WRITE_LIMIT = (2 ** 32) - 1 _WIN_WRITE_LIMIT = (2 ** 31) - 1 def _array_to_file(arr, outfile): """ Write a numpy array to a file or a file-like object. Parameters ---------- arr : `~numpy.ndarray` The Numpy array to write. outfile : file-like A file-like object such as a Python file object, an `io.BytesIO`, or anything else with a ``write`` method. The file object must support the buffer interface in its ``write``. If writing directly to an on-disk file this delegates directly to `ndarray.tofile`. Otherwise a slower Python implementation is used. """ if isfile(outfile): write = lambda a, f: a.tofile(f) else: write = _array_to_file_like # Implements a workaround for a bug deep in OSX's stdlib file writing # functions; on 64-bit OSX it is not possible to correctly write a number # of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192-- # whatever the default blocksize for the filesystem is). # This issue should have a workaround in Numpy too, but hasn't been # implemented there yet: https://github.com/astropy/astropy/issues/839 # # Apparently Windows has its own fwrite bug: # https://github.com/numpy/numpy/issues/2256 if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and arr.nbytes % 4096 == 0): # chunksize is a count of elements in the array, not bytes chunksize = _OSX_WRITE_LIMIT // arr.itemsize elif sys.platform.startswith('win'): chunksize = _WIN_WRITE_LIMIT // arr.itemsize else: # Just pass the whole array to the write routine return write(arr, outfile) # Write one chunk at a time for systems whose fwrite chokes on large # writes. idx = 0 arr = arr.view(np.ndarray).flatten() while idx < arr.nbytes: write(arr[idx:idx + chunksize], outfile) idx += chunksize def _array_to_file_like(arr, fileobj): """ Write a `~numpy.ndarray` to a file-like object (which is not supported by `numpy.ndarray.tofile`). """ # If the array is empty, we can simply take a shortcut and return since # there is nothing to write. if len(arr) == 0: return if arr.flags.contiguous: # It suffices to just pass the underlying buffer directly to the # fileobj's write (assuming it supports the buffer interface). If # it does not have the buffer interface, a TypeError should be returned # in which case we can fall back to the other methods. try: fileobj.write(arr.data) except TypeError: pass else: return if hasattr(np, 'nditer'): # nditer version for non-contiguous arrays for item in np.nditer(arr): fileobj.write(item.tostring()) else: # Slower version for Numpy versions without nditer; # The problem with flatiter is it doesn't preserve the original # byteorder byteorder = arr.dtype.byteorder if ((sys.byteorder == 'little' and byteorder == '>') or (sys.byteorder == 'big' and byteorder == '<')): for item in arr.flat: fileobj.write(item.byteswap().tostring()) else: for item in arr.flat: fileobj.write(item.tostring()) def _write_string(f, s): """ Write a string to a file, encoding to ASCII if the file is open in binary mode, or decoding if the file is open in text mode. """ # Assume if the file object doesn't have a specific mode, that the mode is # binary binmode = fileobj_is_binary(f) if binmode and isinstance(s, str): s = encode_ascii(s) elif not binmode and not isinstance(f, str): s = decode_ascii(s) f.write(s) def _convert_array(array, dtype): """ Converts an array to a new dtype--if the itemsize of the new dtype is the same as the old dtype and both types are not numeric, a view is returned. Otherwise a new array must be created. """ if array.dtype == dtype: return array elif (array.dtype.itemsize == dtype.itemsize and not (np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number))): # Includes a special case when both dtypes are at least numeric to # account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218 return array.view(dtype) else: return array.astype(dtype) def _unsigned_zero(dtype): """ Given a numpy dtype, finds its "zero" point, which is exactly in the middle of its range. """ assert dtype.kind == 'u' return 1 << (dtype.itemsize * 8 - 1) def _is_pseudo_unsigned(dtype): return dtype.kind == 'u' and dtype.itemsize >= 2 def _is_int(val): return isinstance(val, all_integer_types) def _str_to_num(val): """Converts a given string to either an int or a float if necessary.""" try: num = int(val) except ValueError: # If this fails then an exception should be raised anyways num = float(val) return num def _words_group(input, strlen): """ Split a long string into parts where each part is no longer than ``strlen`` and no word is cut into two pieces. But if there is one single word which is longer than ``strlen``, then it will be split in the middle of the word. """ words = [] nblanks = input.count(' ') nmax = max(nblanks, len(input) // strlen + 1) arr = np.frombuffer((input + ' ').encode('utf8'), dtype=(bytes, 1)) # locations of the blanks blank_loc = np.nonzero(arr == b' ')[0] offset = 0 xoffset = 0 for idx in range(nmax): try: loc = np.nonzero(blank_loc >= strlen + offset)[0][0] offset = blank_loc[loc - 1] + 1 if loc == 0: offset = -1 except Exception: offset = len(input) # check for one word longer than strlen, break in the middle if offset <= xoffset: offset = xoffset + strlen # collect the pieces in a list words.append(input[xoffset:offset]) if len(input) == offset: break xoffset = offset return words def _tmp_name(input): """ Create a temporary file name which should not already exist. Use the directory of the input file as the base name of the mkstemp() output. """ if input is not None: input = os.path.dirname(input) f, fn = tempfile.mkstemp(dir=input) os.close(f) return fn def _get_array_mmap(array): """ If the array has an mmap.mmap at base of its base chain, return the mmap object; otherwise return None. """ if isinstance(array, mmap.mmap): return array base = array while hasattr(base, 'base') and base.base is not None: if isinstance(base.base, mmap.mmap): return base.base base = base.base @contextmanager def _free_space_check(hdulist, dirname=None): try: yield except OSError as exc: error_message = '' if not isinstance(hdulist, list): hdulist = [hdulist, ] if dirname is None: dirname = os.path.dirname(hdulist._file.name) if os.path.isdir(dirname): free_space = data.get_free_space_in_dir(dirname) hdulist_size = np.sum(hdu.size for hdu in hdulist) if free_space < hdulist_size: error_message = ("Not enough space on disk: requested {}, " "available {}. ".format(hdulist_size, free_space)) for hdu in hdulist: hdu._close() raise OSError(error_message + str(exc)) def _extract_number(value, default): """ Attempts to extract an integer number from the given value. If the extraction fails, the value of the 'default' argument is returned. """ try: # The _str_to_num method converts the value to string/float # so we need to perform one additional conversion to int on top return int(_str_to_num(value)) except (TypeError, ValueError): return default def get_testdata_filepath(filename): """ Return a string representing the path to the file requested from the io.fits test data set. .. versionadded:: 2.0.3 Parameters ---------- filename : str The filename of the test data file. Returns ------- filepath : str The path to the requested file. """ return data.get_pkg_data_filename( 'io/fits/tests/data/{}'.format(filename), 'astropy') def _rstrip_inplace(array): """ Performs an in-place rstrip operation on string arrays. This is necessary since the built-in `np.char.rstrip` in Numpy does not perform an in-place calculation. """ # The following implementation convert the string to unsigned integers of # the right length. Trailing spaces (which are represented as 32) are then # converted to null characters (represented as zeros). To avoid creating # large temporary mask arrays, we loop over chunks (attempting to do that # on a 1-D version of the array; large memory may still be needed in the # unlikely case that a string array has small first dimension and cannot # be represented as a contiguous 1-D array in memory). dt = array.dtype if dt.kind not in 'SU': raise TypeError("This function can only be used on string arrays") # View the array as appropriate integers. The last dimension will # equal the number of characters in each string. bpc = 1 if dt.kind == 'S' else 4 dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc) b = array.view(dt_int, np.ndarray) # For optimal speed, work in chunks of the internal ufunc buffer size. bufsize = np.getbufsize() # Attempt to have the strings as a 1-D array to give the chunk known size. # Note: the code will work if this fails; the chunks will just be larger. if b.ndim > 2: try: b.shape = -1, b.shape[-1] except AttributeError: # can occur for non-contiguous arrays pass for j in range(0, b.shape[0], bufsize): c = b[j:j + bufsize] # Mask which will tell whether we're in a sequence of trailing spaces. mask = np.ones(c.shape[:-1], dtype=bool) # Loop over the characters in the strings, in reverse order. We process # the i-th character of all strings in the chunk at the same time. If # the character is 32, this corresponds to a space, and we then change # this to 0. We then construct a new mask to find rows where the # i-th character is 0 (null) and the i-1-th is 32 (space) and repeat. for i in range(-1, -c.shape[-1], -1): mask &= c[..., i] == 32 c[..., i][mask] = 0 mask = c[..., i] == 0 return array
b45b4d3ed9663a31316d7c7dd252e7af09849581aabb04f14dfc81de1a27c3db
# Licensed under a 3-clause BSD style license - see PYFITS.rst import re import warnings import numpy as np from .util import _str_to_num, _is_int, translate, _words_group from .verify import _Verify, _ErrList, VerifyError, VerifyWarning from . import conf from ...utils.exceptions import AstropyUserWarning __all__ = ['Card', 'Undefined'] FIX_FP_TABLE = str.maketrans('de', 'DE') FIX_FP_TABLE2 = str.maketrans('dD', 'eE') CARD_LENGTH = 80 BLANK_CARD = ' ' * CARD_LENGTH KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords VALUE_INDICATOR = '= ' # The standard FITS value indicator HIERARCH_VALUE_INDICATOR = '=' # HIERARCH cards may use a shortened indicator class Undefined: """Undefined value.""" def __init__(self): # This __init__ is required to be here for Sphinx documentation pass UNDEFINED = Undefined() class Card(_Verify): length = CARD_LENGTH """The length of a Card image; should always be 80 for valid FITS files.""" # String for a FITS standard compliant (FSC) keyword. _keywd_FSC_RE = re.compile(r'^[A-Z0-9_-]{0,%d}$' % KEYWORD_LENGTH) # This will match any printable ASCII character excluding '=' _keywd_hierarch_RE = re.compile(r'^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$', re.I) # A number sub-string, either an integer or a float in fixed or # scientific notation. One for FSC and one for non-FSC (NFSC) format: # NFSC allows lower case of DE for exponent, allows space between sign, # digits, exponent sign, and exponents _digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?' _digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps delete leading zeros from numbers, otherwise # Python might evaluate them as octal values (this is not-greedy, however, # so it may not strip leading zeros from a float, which is fine) _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*?(?P<digt>{})'.format( _digits_FSC)) _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*?(?P<digt>{})'.format( _digits_NFSC)) # FSC commentary card string which must contain printable ASCII characters. # Note: \Z matches the end of the string without allowing newlines _ascii_text_re = re.compile(r'[ -~]*\Z') # Checks for a valid value/comment string. It returns a match object # for a valid value/comment string. # The valu group will return a match if a FITS string, boolean, # number, or complex value is found, otherwise it will return # None, meaning the keyword is undefined. The comment field will # return a match if the comment separator is found, though the # comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct for all cases, but # it comes pretty darn close. It appears to find the # end of a string rather well, but will accept # strings with an odd number of single quotes, # instead of issuing an error. The FITS standard # appears vague on this issue and only states that a # string should not end with two single quotes, # whereas it should not end with an even number of # quotes to be precise. # # Note that a non-greedy match is done for a string, # since a greedy match will find a single-quote after # the comment separator resulting in an incorrect # match. r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + r')|' r'(?P<cplx>\( *' r'(?P<real>' + _numr_FSC + r') *, *' r'(?P<imag>' + _numr_FSC + r') *\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + r')|' r'(?P<cplx>\( *' r'(?P<real>' + _numr_NFSC + r') *, *' r'(?P<imag>' + _numr_NFSC + r') *\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>(.|\n)*)' r')?$') _rvkc_identifier = r'[a-zA-Z_]\w*' _rvkc_field = _rvkc_identifier + r'(\.\d+)?' _rvkc_field_specifier_s = r'{}(\.{})*'.format(_rvkc_field, _rvkc_field) _rvkc_field_specifier_val = (r'(?P<keyword>{}): (?P<val>{})'.format( _rvkc_field_specifier_s, _numr_FSC)) _rvkc_keyword_val = r'\'(?P<rawval>{})\''.format(_rvkc_field_specifier_val) _rvkc_keyword_val_comm = (r' *{} *(/ *(?P<comm>[ -~]*))?$'.format( _rvkc_keyword_val)) _rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + '$') # regular expression to extract the key and the field specifier from a # string that is being used to index into a card list that contains # record value keyword cards (ex. 'DP1.AXIS.1') _rvkc_keyword_name_RE = ( re.compile(r'(?P<keyword>{})\.(?P<field_specifier>{})$'.format( _rvkc_identifier, _rvkc_field_specifier_s))) # regular expression to extract the field specifier and value and comment # from the string value of a record value keyword card # (ex "'AXIS.1: 1' / a comment") _rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm) _commentary_keywords = {'', 'COMMENT', 'HISTORY', 'END'} # The default value indicator; may be changed if required by a convention # (namely HIERARCH cards) _value_indicator = VALUE_INDICATOR def __init__(self, keyword=None, value=None, comment=None, **kwargs): # For backwards compatibility, support the 'key' keyword argument: if keyword is None and 'key' in kwargs: keyword = kwargs['key'] self._keyword = None self._value = None self._comment = None self._image = None # This attribute is set to False when creating the card from a card # image to ensure that the contents of the image get verified at some # point self._verified = True # A flag to conveniently mark whether or not this was a valid HIERARCH # card self._hierarch = False # If the card could not be parsed according the the FITS standard or # any recognized non-standard conventions, this will be True self._invalid = False self._field_specifier = None # These are used primarily only by RVKCs self._rawkeyword = None self._rawvalue = None if not (keyword is not None and value is not None and self._check_if_rvkc(keyword, value)): # If _check_if_rvkc passes, it will handle setting the keyword and # value if keyword is not None: self.keyword = keyword if value is not None: self.value = value if comment is not None: self.comment = comment self._modified = False self._valuestring = None self._valuemodified = False def __repr__(self): return repr((self.keyword, self.value, self.comment)) def __str__(self): return self.image def __len__(self): return 3 def __getitem__(self, index): return (self.keyword, self.value, self.comment)[index] @property def keyword(self): """Returns the keyword name parsed from the card image.""" if self._keyword is not None: return self._keyword elif self._image: self._keyword = self._parse_keyword() return self._keyword else: self.keyword = '' return '' @keyword.setter def keyword(self, keyword): """Set the key attribute; once set it cannot be modified.""" if self._keyword is not None: raise AttributeError( 'Once set, the Card keyword may not be modified') elif isinstance(keyword, str): # Be nice and remove trailing whitespace--some FITS code always # pads keywords out with spaces; leading whitespace, however, # should be strictly disallowed. keyword = keyword.rstrip() keyword_upper = keyword.upper() if (len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(keyword_upper)): # For keywords with length > 8 they will be HIERARCH cards, # and can have arbitrary case keywords if keyword_upper == 'END': raise ValueError("Keyword 'END' not allowed.") keyword = keyword_upper elif self._keywd_hierarch_RE.match(keyword): # In prior versions of PyFITS (*) HIERARCH cards would only be # created if the user-supplied keyword explicitly started with # 'HIERARCH '. Now we will create them automatically for long # keywords, but we still want to support the old behavior too; # the old behavior makes it possible to create HEIRARCH cards # that would otherwise be recognized as RVKCs # (*) This has never affected Astropy, because it was changed # before PyFITS was merged into Astropy! self._hierarch = True self._value_indicator = HIERARCH_VALUE_INDICATOR if keyword_upper[:9] == 'HIERARCH ': # The user explicitly asked for a HIERARCH card, so don't # bug them about it... keyword = keyword[9:].strip() else: # We'll gladly create a HIERARCH card, but a warning is # also displayed warnings.warn( 'Keyword name {!r} is greater than 8 characters or ' 'contains characters not allowed by the FITS ' 'standard; a HIERARCH card will be created.'.format( keyword), VerifyWarning) else: raise ValueError('Illegal keyword name: {!r}.'.format(keyword)) self._keyword = keyword self._modified = True else: raise ValueError('Keyword name {!r} is not a string.'.format(keyword)) @property def value(self): """The value associated with the keyword stored in this card.""" if self.field_specifier: return float(self._value) if self._value is not None: value = self._value elif self._valuestring is not None or self._image: self._value = self._parse_value() value = self._value else: self._value = value = '' if conf.strip_header_whitespace and isinstance(value, str): value = value.rstrip() return value @value.setter def value(self, value): if self._invalid: raise ValueError( 'The value of invalid/unparseable cards cannot set. Either ' 'delete this card from the header or replace it.') if value is None: value = '' oldvalue = self._value if oldvalue is None: oldvalue = '' if not isinstance(value, (str, int, float, complex, bool, Undefined, np.floating, np.integer, np.complexfloating, np.bool_)): raise ValueError('Illegal value: {!r}.'.format(value)) if isinstance(value, float) and (np.isnan(value) or np.isinf(value)): raise ValueError("Floating point {!r} values are not allowed " "in FITS headers.".format(value)) elif isinstance(value, str): m = self._ascii_text_re.match(value) if not m: raise ValueError( 'FITS header values must contain standard printable ASCII ' 'characters; {!r} contains characters not representable in ' 'ASCII or non-printable characters.'.format(value)) elif isinstance(value, bytes): # Allow str, but only if they can be decoded to ASCII text; note # this is not even allowed on Python 3 since the `bytes` type is # not included in `str`. Presently we simply don't # allow bytes to be assigned to headers, as doing so would too # easily mask potential user error valid = True try: text_value = value.decode('ascii') except UnicodeDecodeError: valid = False else: # Check against the printable characters regexp as well m = self._ascii_text_re.match(text_value) valid = m is not None if not valid: raise ValueError( 'FITS header values must contain standard printable ASCII ' 'characters; {!r} contains characters/bytes that do not ' 'represent printable characters in ASCII.'.format(value)) elif isinstance(value, np.bool_): value = bool(value) if (conf.strip_header_whitespace and (isinstance(oldvalue, str) and isinstance(value, str))): # Ignore extra whitespace when comparing the new value to the old different = oldvalue.rstrip() != value.rstrip() elif isinstance(oldvalue, bool) or isinstance(value, bool): different = oldvalue is not value else: different = (oldvalue != value or not isinstance(value, type(oldvalue))) if different: self._value = value self._rawvalue = None self._modified = True self._valuestring = None self._valuemodified = True if self.field_specifier: try: self._value = _int_or_float(self._value) except ValueError: raise ValueError('value {} is not a float'.format( self._value)) @value.deleter def value(self): if self._invalid: raise ValueError( 'The value of invalid/unparseable cards cannot deleted. ' 'Either delete this card from the header or replace it.') if not self.field_specifier: self.value = '' else: raise AttributeError('Values cannot be deleted from record-valued ' 'keyword cards') @property def rawkeyword(self): """On record-valued keyword cards this is the name of the standard <= 8 character FITS keyword that this RVKC is stored in. Otherwise it is the card's normal keyword. """ if self._rawkeyword is not None: return self._rawkeyword elif self.field_specifier is not None: self._rawkeyword = self.keyword.split('.', 1)[0] return self._rawkeyword else: return self.keyword @property def rawvalue(self): """On record-valued keyword cards this is the raw string value in the ``<field-specifier>: <value>`` format stored in the card in order to represent a RVKC. Otherwise it is the card's normal value. """ if self._rawvalue is not None: return self._rawvalue elif self.field_specifier is not None: self._rawvalue = '{}: {}'.format(self.field_specifier, self.value) return self._rawvalue else: return self.value @property def comment(self): """Get the comment attribute from the card image if not already set.""" if self._comment is not None: return self._comment elif self._image: self._comment = self._parse_comment() return self._comment else: self.comment = '' return '' @comment.setter def comment(self, comment): if self._invalid: raise ValueError( 'The comment of invalid/unparseable cards cannot set. Either ' 'delete this card from the header or replace it.') if comment is None: comment = '' if isinstance(comment, str): m = self._ascii_text_re.match(comment) if not m: raise ValueError( 'FITS header comments must contain standard printable ' 'ASCII characters; {!r} contains characters not ' 'representable in ASCII or non-printable characters.'.format( comment)) oldcomment = self._comment if oldcomment is None: oldcomment = '' if comment != oldcomment: self._comment = comment self._modified = True @comment.deleter def comment(self): if self._invalid: raise ValueError( 'The comment of invalid/unparseable cards cannot deleted. ' 'Either delete this card from the header or replace it.') self.comment = '' @property def field_specifier(self): """ The field-specifier of record-valued keyword cards; always `None` on normal cards. """ # Ensure that the keyword exists and has been parsed--the will set the # internal _field_specifier attribute if this is a RVKC. if self.keyword: return self._field_specifier else: return None @field_specifier.setter def field_specifier(self, field_specifier): if not field_specifier: raise ValueError('The field-specifier may not be blank in ' 'record-valued keyword cards.') elif not self.field_specifier: raise AttributeError('Cannot coerce cards to be record-valued ' 'keyword cards by setting the ' 'field_specifier attribute') elif field_specifier != self.field_specifier: self._field_specifier = field_specifier # The keyword need also be updated keyword = self._keyword.split('.', 1)[0] self._keyword = '.'.join([keyword, field_specifier]) self._modified = True @field_specifier.deleter def field_specifier(self): raise AttributeError('The field_specifier attribute may not be ' 'deleted from record-valued keyword cards.') @property def image(self): """ The card "image", that is, the 80 byte character string that represents this card in an actual FITS header. """ if self._image and not self._verified: self.verify('fix+warn') if self._image is None or self._modified: self._image = self._format_image() return self._image @property def is_blank(self): """ `True` if the card is completely blank--that is, it has no keyword, value, or comment. It appears in the header as 80 spaces. Returns `False` otherwise. """ if not self._verified: # The card image has not been parsed yet; compare directly with the # string representation of a blank card return self._image == BLANK_CARD # If the keyword, value, and comment are all empty (for self.value # explicitly check that it is a string value, since a blank value is # returned as '') return (not self.keyword and (isinstance(self.value, str) and not self.value) and not self.comment) @classmethod def fromstring(cls, image): """ Construct a `Card` object from a (raw) string. It will pad the string if it is not the length of a card image (80 columns). If the card image is longer than 80 columns, assume it contains ``CONTINUE`` card(s). """ card = cls() card._image = _pad(image) card._verified = False return card @classmethod def normalize_keyword(cls, keyword): """ `classmethod` to convert a keyword value that may contain a field-specifier to uppercase. The effect is to raise the key to uppercase and leave the field specifier in its original case. Parameters ---------- keyword : or str A keyword value or a ``keyword.field-specifier`` value """ # Test first for the most common case: a standard FITS keyword provided # in standard all-caps if (len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword)): return keyword # Test if this is a record-valued keyword match = cls._rvkc_keyword_name_RE.match(keyword) if match: return '.'.join((match.group('keyword').strip().upper(), match.group('field_specifier'))) elif len(keyword) > 9 and keyword[:9].upper() == 'HIERARCH ': # Remove 'HIERARCH' from HIERARCH keywords; this could lead to # ambiguity if there is actually a keyword card containing # "HIERARCH HIERARCH", but shame on you if you do that. return keyword[9:].strip().upper() else: # A normal FITS keyword, but provided in non-standard case return keyword.strip().upper() def _check_if_rvkc(self, *args): """ Determine whether or not the card is a record-valued keyword card. If one argument is given, that argument is treated as a full card image and parsed as such. If two arguments are given, the first is treated as the card keyword (including the field-specifier if the card is intended as a RVKC), and the second as the card value OR the first value can be the base keyword, and the second value the 'field-specifier: value' string. If the check passes the ._keyword, ._value, and .field_specifier keywords are set. Examples -------- :: self._check_if_rvkc('DP1', 'AXIS.1: 2') self._check_if_rvkc('DP1.AXIS.1', 2) self._check_if_rvkc('DP1 = AXIS.1: 2') """ if not conf.enable_record_valued_keyword_cards: return False if len(args) == 1: self._check_if_rvkc_image(*args) elif len(args) == 2: keyword, value = args if not isinstance(keyword, str): return False if keyword in self._commentary_keywords: return False match = self._rvkc_keyword_name_RE.match(keyword) if match and isinstance(value, (int, float)): self._init_rvkc(match.group('keyword'), match.group('field_specifier'), None, value) return True # Testing for ': ' is a quick way to avoid running the full regular # expression, speeding this up for the majority of cases if isinstance(value, str) and value.find(': ') > 0: match = self._rvkc_field_specifier_val_RE.match(value) if match and self._keywd_FSC_RE.match(keyword): self._init_rvkc(keyword, match.group('keyword'), value, match.group('val')) return True def _check_if_rvkc_image(self, *args): """ Implements `Card._check_if_rvkc` for the case of an unparsed card image. If given one argument this is the full intact image. If given two arguments the card has already been split between keyword and value+comment at the standard value indicator '= '. """ if len(args) == 1: image = args[0] eq_idx = image.find(VALUE_INDICATOR) if eq_idx < 0 or eq_idx > 9: return False keyword = image[:eq_idx] rest = image[eq_idx + len(VALUE_INDICATOR):] else: keyword, rest = args rest = rest.lstrip() # This test allows us to skip running the full regular expression for # the majority of cards that do not contain strings or that definitely # do not contain RVKC field-specifiers; it's very much a # micro-optimization but it does make a measurable difference if not rest or rest[0] != "'" or rest.find(': ') < 2: return False match = self._rvkc_keyword_val_comm_RE.match(rest) if match: self._init_rvkc(keyword, match.group('keyword'), match.group('rawval'), match.group('val')) return True def _init_rvkc(self, keyword, field_specifier, field, value): """ Sort of addendum to Card.__init__ to set the appropriate internal attributes if the card was determined to be a RVKC. """ keyword_upper = keyword.upper() self._keyword = '.'.join((keyword_upper, field_specifier)) self._rawkeyword = keyword_upper self._field_specifier = field_specifier self._value = _int_or_float(value) self._rawvalue = field def _parse_keyword(self): keyword = self._image[:KEYWORD_LENGTH].strip() keyword_upper = keyword.upper() val_ind_idx = self._image.find(VALUE_INDICATOR) special = self._commentary_keywords if (0 <= val_ind_idx <= KEYWORD_LENGTH or keyword_upper in special or keyword_upper == 'CONTINUE'): # The value indicator should appear in byte 8, but we are flexible # and allow this to be fixed if val_ind_idx >= 0: keyword = keyword[:val_ind_idx] rest = self._image[val_ind_idx + len(VALUE_INDICATOR):] # So far this looks like a standard FITS keyword; check whether # the value represents a RVKC; if so then we pass things off to # the RVKC parser if self._check_if_rvkc_image(keyword, rest): return self._keyword keyword_upper = keyword_upper[:val_ind_idx] return keyword_upper elif (keyword_upper == 'HIERARCH' and self._image[8] == ' ' and HIERARCH_VALUE_INDICATOR in self._image): # This is valid HIERARCH card as described by the HIERARCH keyword # convention: # http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html self._hierarch = True self._value_indicator = HIERARCH_VALUE_INDICATOR keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:] return keyword.strip() else: warnings.warn('The following header keyword is invalid or follows ' 'an unrecognized non-standard convention:\n{}'.format( self._image), AstropyUserWarning) self._invalid = True return keyword def _parse_value(self): """Extract the keyword value from the card image.""" # for commentary cards, no need to parse further # Likewise for invalid cards if self.keyword.upper() in self._commentary_keywords or self._invalid: return self._image[KEYWORD_LENGTH:].rstrip() if self._check_if_rvkc(self._image): return self._value if len(self._image) > self.length: values = [] for card in self._itersubcards(): value = card.value.rstrip().replace("''", "'") if value and value[-1] == '&': value = value[:-1] values.append(value) value = ''.join(values) self._valuestring = value return value m = self._value_NFSC_RE.match(self._split()[1]) if m is None: raise VerifyError("Unparsable card ({}), fix it first with " ".verify('fix').".format(self.keyword)) if m.group('bool') is not None: value = m.group('bool') == 'T' elif m.group('strg') is not None: value = re.sub("''", "'", m.group('strg')) elif m.group('numr') is not None: # Check for numbers with leading 0s. numr = self._number_NFSC_RE.match(m.group('numr')) digt = translate(numr.group('digt'), FIX_FP_TABLE2, ' ') if numr.group('sign') is None: sign = '' else: sign = numr.group('sign') value = _str_to_num(sign + digt) elif m.group('cplx') is not None: # Check for numbers with leading 0s. real = self._number_NFSC_RE.match(m.group('real')) rdigt = translate(real.group('digt'), FIX_FP_TABLE2, ' ') if real.group('sign') is None: rsign = '' else: rsign = real.group('sign') value = _str_to_num(rsign + rdigt) imag = self._number_NFSC_RE.match(m.group('imag')) idigt = translate(imag.group('digt'), FIX_FP_TABLE2, ' ') if imag.group('sign') is None: isign = '' else: isign = imag.group('sign') value += _str_to_num(isign + idigt) * 1j else: value = UNDEFINED if not self._valuestring: self._valuestring = m.group('valu') return value def _parse_comment(self): """Extract the keyword value from the card image.""" # for commentary cards, no need to parse further # likewise for invalid/unparseable cards if self.keyword in Card._commentary_keywords or self._invalid: return '' if len(self._image) > self.length: comments = [] for card in self._itersubcards(): if card.comment: comments.append(card.comment) comment = '/ ' + ' '.join(comments).rstrip() m = self._value_NFSC_RE.match(comment) else: m = self._value_NFSC_RE.match(self._split()[1]) if m is not None: comment = m.group('comm') if comment: return comment.rstrip() return '' def _split(self): """ Split the card image between the keyword and the rest of the card. """ if self._image is not None: # If we already have a card image, don't try to rebuild a new card # image, which self.image would do image = self._image else: image = self.image if self.keyword in self._commentary_keywords.union(['CONTINUE']): keyword, valuecomment = image.split(' ', 1) else: try: delim_index = image.index(self._value_indicator) except ValueError: delim_index = None # The equal sign may not be any higher than column 10; anything # past that must be considered part of the card value if delim_index is None: keyword = image[:KEYWORD_LENGTH] valuecomment = image[KEYWORD_LENGTH:] elif delim_index > 10 and image[:9] != 'HIERARCH ': keyword = image[:8] valuecomment = image[8:] else: keyword, valuecomment = image.split(self._value_indicator, 1) return keyword.strip(), valuecomment.strip() def _fix_keyword(self): if self.field_specifier: keyword, field_specifier = self._keyword.split('.', 1) self._keyword = '.'.join([keyword.upper(), field_specifier]) else: self._keyword = self._keyword.upper() self._modified = True def _fix_value(self): """Fix the card image for fixable non-standard compliance.""" value = None keyword, valuecomment = self._split() m = self._value_NFSC_RE.match(valuecomment) # for the unparsable case if m is None: try: value, comment = valuecomment.split('/', 1) self.value = value.strip() self.comment = comment.strip() except (ValueError, IndexError): self.value = valuecomment self._valuestring = self._value return elif m.group('numr') is not None: numr = self._number_NFSC_RE.match(m.group('numr')) value = translate(numr.group('digt'), FIX_FP_TABLE, ' ') if numr.group('sign') is not None: value = numr.group('sign') + value elif m.group('cplx') is not None: real = self._number_NFSC_RE.match(m.group('real')) rdigt = translate(real.group('digt'), FIX_FP_TABLE, ' ') if real.group('sign') is not None: rdigt = real.group('sign') + rdigt imag = self._number_NFSC_RE.match(m.group('imag')) idigt = translate(imag.group('digt'), FIX_FP_TABLE, ' ') if imag.group('sign') is not None: idigt = imag.group('sign') + idigt value = '({}, {})'.format(rdigt, idigt) self._valuestring = value # The value itself has not been modified, but its serialized # representation (as stored in self._valuestring) has been changed, so # still set this card as having been modified (see ticket #137) self._modified = True def _format_keyword(self): if self.keyword: if self.field_specifier: return '{:{len}}'.format(self.keyword.split('.', 1)[0], len=KEYWORD_LENGTH) elif self._hierarch: return 'HIERARCH {} '.format(self.keyword) else: return '{:{len}}'.format(self.keyword, len=KEYWORD_LENGTH) else: return ' ' * KEYWORD_LENGTH def _format_value(self): # value string float_types = (float, np.floating, complex, np.complexfloating) # Force the value to be parsed out first value = self.value # But work with the underlying raw value instead (to preserve # whitespace, for now...) value = self._value if self.keyword in self._commentary_keywords: # The value of a commentary card must be just a raw unprocessed # string value = str(value) elif (self._valuestring and not self._valuemodified and isinstance(self.value, float_types)): # Keep the existing formatting for float/complex numbers value = '{:>20}'.format(self._valuestring) elif self.field_specifier: value = _format_value(self._value).strip() value = "'{}: {}'".format(self.field_specifier, value) else: value = _format_value(value) # For HIERARCH cards the value should be shortened to conserve space if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH: value = value.strip() return value def _format_comment(self): if not self.comment: return '' else: return ' / {}'.format(self._comment) def _format_image(self): keyword = self._format_keyword() value = self._format_value() is_commentary = keyword.strip() in self._commentary_keywords if is_commentary: comment = '' else: comment = self._format_comment() # equal sign string # by default use the standard value indicator even for HIERARCH cards; # later we may abbreviate it if necessary delimiter = VALUE_INDICATOR if is_commentary: delimiter = '' # put all parts together output = ''.join([keyword, delimiter, value, comment]) # For HIERARCH cards we can save a bit of space if necessary by # removing the space between the keyword and the equals sign; I'm # guessing this is part of the HIEARCH card specification keywordvalue_length = len(keyword) + len(delimiter) + len(value) if (keywordvalue_length > self.length and keyword.startswith('HIERARCH')): if (keywordvalue_length == self.length + 1 and keyword[-1] == ' '): output = ''.join([keyword[:-1], delimiter, value, comment]) else: # I guess the HIERARCH card spec is incompatible with CONTINUE # cards raise ValueError('The header keyword {!r} with its value is ' 'too long'.format(self.keyword)) if len(output) <= self.length: output = '{:80}'.format(output) else: # longstring case (CONTINUE card) # try not to use CONTINUE if the string value can fit in one line. # Instead, just truncate the comment if (isinstance(self.value, str) and len(value) > (self.length - 10)): output = self._format_long_image() else: warnings.warn('Card is too long, comment will be truncated.', VerifyWarning) output = output[:Card.length] return output def _format_long_image(self): """ Break up long string value/comment into ``CONTINUE`` cards. This is a primitive implementation: it will put the value string in one block and the comment string in another. Also, it does not break at the blank space between words. So it may not look pretty. """ if self.keyword in Card._commentary_keywords: return self._format_long_commentary_image() value_length = 67 comment_length = 64 output = [] # do the value string value = self._value.replace("'", "''") words = _words_group(value, value_length) for idx, word in enumerate(words): if idx == 0: headstr = '{:{len}}= '.format(self.keyword, len=KEYWORD_LENGTH) else: headstr = 'CONTINUE ' # If this is the final CONTINUE remove the '&' if not self.comment and idx == len(words) - 1: value_format = "'{}'" else: value_format = "'{}&'" value = value_format.format(word) output.append('{:80}'.format(headstr + value)) # do the comment string comment_format = "{}" if self.comment: words = _words_group(self.comment, comment_length) for idx, word in enumerate(words): # If this is the final CONTINUE remove the '&' if idx == len(words) - 1: headstr = "CONTINUE '' / " else: headstr = "CONTINUE '&' / " comment = headstr + comment_format.format(word) output.append('{:80}'.format(comment)) return ''.join(output) def _format_long_commentary_image(self): """ If a commentary card's value is too long to fit on a single card, this will render the card as multiple consecutive commentary card of the same type. """ maxlen = Card.length - KEYWORD_LENGTH value = self._format_value() output = [] idx = 0 while idx < len(value): output.append(str(Card(self.keyword, value[idx:idx + maxlen]))) idx += maxlen return ''.join(output) def _verify(self, option='warn'): self._verified = True errs = _ErrList([]) fix_text = ('Fixed {!r} card to meet the FITS ' 'standard.'.format(self.keyword)) # Don't try to verify cards that already don't meet any recognizable # standard if self._invalid: return errs # verify the equal sign position if (self.keyword not in self._commentary_keywords and (self._image and self._image[:9].upper() != 'HIERARCH ' and self._image.find('=') != 8)): errs.append(self.run_option( option, err_text='Card {!r} is not FITS standard (equal sign not ' 'at column 8).'.format(self.keyword), fix_text=fix_text, fix=self._fix_value)) # verify the key, it is never fixable # always fix silently the case where "=" is before column 9, # since there is no way to communicate back to the _keys. if ((self._image and self._image[:8].upper() == 'HIERARCH') or self._hierarch): pass else: if self._image: # PyFITS will auto-uppercase any standard keyword, so lowercase # keywords can only occur if they came from the wild keyword = self._split()[0] if keyword != keyword.upper(): # Keyword should be uppercase unless it's a HIERARCH card errs.append(self.run_option( option, err_text='Card keyword {!r} is not upper case.'.format( keyword), fix_text=fix_text, fix=self._fix_keyword)) keyword = self.keyword if self.field_specifier: keyword = keyword.split('.', 1)[0] if not self._keywd_FSC_RE.match(keyword): errs.append(self.run_option( option, err_text='Illegal keyword name {!r}'.format(keyword), fixable=False)) # verify the value, it may be fixable keyword, valuecomment = self._split() if self.keyword in self._commentary_keywords: # For commentary keywords all that needs to be ensured is that it # contains only printable ASCII characters if not self._ascii_text_re.match(valuecomment): errs.append(self.run_option( option, err_text='Unprintable string {!r}; commentary cards may ' 'only contain printable ASCII characters'.format( valuecomment), fixable=False)) else: m = self._value_FSC_RE.match(valuecomment) if not m: errs.append(self.run_option( option, err_text='Card {!r} is not FITS standard (invalid value ' 'string: {!r}).'.format(self.keyword, valuecomment), fix_text=fix_text, fix=self._fix_value)) # verify the comment (string), it is never fixable m = self._value_NFSC_RE.match(valuecomment) if m is not None: comment = m.group('comm') if comment is not None: if not self._ascii_text_re.match(comment): errs.append(self.run_option( option, err_text=('Unprintable string {!r}; header comments ' 'may only contain printable ASCII ' 'characters'.format(comment)), fixable=False)) return errs def _itersubcards(self): """ If the card image is greater than 80 characters, it should consist of a normal card followed by one or more CONTINUE card. This method returns the subcards that make up this logical card. """ ncards = len(self._image) // Card.length for idx in range(0, Card.length * ncards, Card.length): card = Card.fromstring(self._image[idx:idx + Card.length]) if idx > 0 and card.keyword.upper() != 'CONTINUE': raise VerifyError( 'Long card images must have CONTINUE cards after ' 'the first card.') if not isinstance(card.value, str): raise VerifyError('CONTINUE cards must have string values.') yield card def _int_or_float(s): """ Converts an a string to an int if possible, or to a float. If the string is neither a string or a float a value error is raised. """ if isinstance(s, float): # Already a float so just pass through return s try: return int(s) except (ValueError, TypeError): try: return float(s) except (ValueError, TypeError) as e: raise ValueError(str(e)) def _format_value(value): """ Converts a card value to its appropriate string representation as defined by the FITS format. """ # string value should occupies at least 8 columns, unless it is # a null string if isinstance(value, str): if value == '': return "''" else: exp_val_str = value.replace("'", "''") val_str = "'{:8}'".format(exp_val_str) return '{:20}'.format(val_str) # must be before int checking since bool is also int elif isinstance(value, (bool, np.bool_)): return '{:>20}'.format(repr(value)[0]) # T or F elif _is_int(value): return '{:>20d}'.format(value) elif isinstance(value, (float, np.floating)): return '{:>20}'.format(_format_float(value)) elif isinstance(value, (complex, np.complexfloating)): val_str = '({}, {})'.format(_format_float(value.real), _format_float(value.imag)) return '{:>20}'.format(val_str) elif isinstance(value, Undefined): return '' else: return '' def _format_float(value): """Format a floating number to make sure it gets the decimal point.""" value_str = '{:.16G}'.format(value) if '.' not in value_str and 'E' not in value_str: value_str += '.0' elif 'E' in value_str: # On some Windows builds of Python (and possibly other platforms?) the # exponent is zero-padded out to, it seems, three digits. Normalize # the format to pad only to two digits. significand, exponent = value_str.split('E') if exponent[0] in ('+', '-'): sign = exponent[0] exponent = exponent[1:] else: sign = '' value_str = '{}E{}{:02d}'.format(significand, sign, int(exponent)) # Limit the value string to at most 20 characters. str_len = len(value_str) if str_len > 20: idx = value_str.find('E') if idx < 0: value_str = value_str[:20] else: value_str = value_str[:20 - (str_len - idx)] + value_str[idx:] return value_str def _pad(input): """Pad blank space to the input string to be multiple of 80.""" _len = len(input) if _len == Card.length: return input elif _len > Card.length: strlen = _len % Card.length if strlen == 0: return input else: return input + ' ' * (Card.length - strlen) # minimum length is 80 else: strlen = _len % Card.length return input + ' ' * (Card.length - strlen)
1618a7f018fa0bc666e1251dbe7be4782471eb9c2bfbaf66101d6c3d0c24c177
# Licensed under a 3-clause BSD style license - see PYFITS.rst import operator import warnings from ...utils import indent from ...utils.exceptions import AstropyUserWarning class VerifyError(Exception): """ Verify exception class. """ class VerifyWarning(AstropyUserWarning): """ Verify warning class. """ VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix', 'fix+ignore', 'fix+warn', 'fix+exception', 'silentfix+ignore', 'silentfix+warn', 'silentfix+exception'] class _Verify: """ Shared methods for verification. """ def run_option(self, option='warn', err_text='', fix_text='Fixed.', fix=None, fixable=True): """ Execute the verification with selected option. """ text = err_text if option in ['warn', 'exception']: fixable = False # fix the value elif not fixable: text = 'Unfixable error: {}'.format(text) else: if fix: fix() text += ' ' + fix_text return (fixable, text) def verify(self, option='warn'): """ Verify all values in the instance. Parameters ---------- option : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. """ opt = option.lower() if opt not in VERIFY_OPTIONS: raise ValueError('Option {!r} not recognized.'.format(option)) if opt == 'ignore': return errs = self._verify(opt) # Break the verify option into separate options related to reporting of # errors, and fixing of fixable errors if '+' in opt: fix_opt, report_opt = opt.split('+') elif opt in ['fix', 'silentfix']: # The original default behavior for 'fix' and 'silentfix' was to # raise an exception for unfixable errors fix_opt, report_opt = opt, 'exception' else: fix_opt, report_opt = None, opt if fix_opt == 'silentfix' and report_opt == 'ignore': # Fixable errors were fixed, but don't report anything return if fix_opt == 'silentfix': # Don't print out fixable issues; the first element of each verify # item is a boolean indicating whether or not the issue was fixable line_filter = lambda x: not x[0] elif fix_opt == 'fix' and report_opt == 'ignore': # Don't print *unfixable* issues, but do print fixed issues; this # is probably not very useful but the option exists for # completeness line_filter = operator.itemgetter(0) else: line_filter = None unfixable = False messages = [] for fixable, message in errs.iter_lines(filter=line_filter): if fixable is not None: unfixable = not fixable messages.append(message) if messages: messages.insert(0, 'Verification reported errors:') messages.append('Note: astropy.io.fits uses zero-based indexing.\n') if fix_opt == 'silentfix' and not unfixable: return elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable): for line in messages: warnings.warn(line, VerifyWarning) else: raise VerifyError('\n' + '\n'.join(messages)) class _ErrList(list): """ Verification errors list class. It has a nested list structure constructed by error messages generated by verifications at different class levels. """ def __init__(self, val=(), unit='Element'): super().__init__(val) self.unit = unit def __str__(self): return '\n'.join(item[1] for item in self.iter_lines()) def iter_lines(self, filter=None, shift=0): """ Iterate the nested structure as a list of strings with appropriate indentations for each level of structure. """ element = 0 # go through the list twice, first time print out all top level # messages for item in self: if not isinstance(item, _ErrList): if filter is None or filter(item): yield item[0], indent(item[1], shift=shift) # second time go through the next level items, each of the next level # must present, even it has nothing. for item in self: if isinstance(item, _ErrList): next_lines = item.iter_lines(filter=filter, shift=shift + 1) try: first_line = next(next_lines) except StopIteration: first_line = None if first_line is not None: if self.unit: # This line is sort of a header for the next level in # the hierarchy yield None, indent('{} {}:'.format(self.unit, element), shift=shift) yield first_line for line in next_lines: yield line element += 1
8ffeb67a34cb15d1f308dc3a261ef9fed8adaa9b1b846022e84c13d6a75c61cb
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. cds.py: Classes to read CDS / Vizier table format :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import fnmatch import itertools import re import os from contextlib import suppress from . import core from . import fixedwidth __doctest_skip__ = ['*'] class CdsHeader(core.BaseHeader): col_type_map = {'e': core.FloatType, 'f': core.FloatType, 'i': core.IntType, 'a': core.StrType} 'The ReadMe file to construct header from.' readme = None def get_type_map_key(self, col): match = re.match(r'\d*(\S)', col.raw_type.lower()) if not match: raise ValueError('Unrecognized CDS format "{}" for column "{}"'.format( col.raw_type, col.name)) return match.group(1) def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines`` for a CDS header. Parameters ---------- lines : list List of table lines """ # Read header block for the table ``self.data.table_name`` from the read # me file ``self.readme``. if self.readme and self.data.table_name: in_header = False readme_inputter = core.BaseInputter() f = readme_inputter.get_lines(self.readme) # Header info is not in data lines but in a separate file. lines = [] comment_lines = 0 for line in f: line = line.strip() if in_header: lines.append(line) if line.startswith(('------', '=======')): comment_lines += 1 if comment_lines == 3: break else: match = re.match(r'Byte-by-byte Description of file: (?P<name>.+)$', line, re.IGNORECASE) if match: # Split 'name' in case in contains multiple files names = [s for s in re.split('[, ]+', match.group('name')) if s] # Iterate on names to find if one matches the tablename # including wildcards. for pattern in names: if fnmatch.fnmatch(self.data.table_name, pattern): in_header = True lines.append(line) break else: raise core.InconsistentTableError("Can't find table {0} in {1}".format( self.data.table_name, self.readme)) found_line = False for i_col_def, line in enumerate(lines): if re.match(r'Byte-by-byte Description', line, re.IGNORECASE): found_line = True elif found_line: # First line after list of file descriptions i_col_def -= 1 # Set i_col_def to last description line break re_col_def = re.compile(r"""\s* (?P<start> \d+ \s* -)? \s* (?P<end> \d+) \s+ (?P<format> [\w.]+) \s+ (?P<units> \S+) \s+ (?P<name> \S+) (\s+ (?P<descr> \S.*))?""", re.VERBOSE) cols = [] for line in itertools.islice(lines, i_col_def+4, None): if line.startswith(('------', '=======')): break match = re_col_def.match(line) if match: col = core.Column(name=match.group('name')) col.start = int(re.sub(r'[-\s]', '', match.group('start') or match.group('end'))) - 1 col.end = int(match.group('end')) col.unit = match.group('units') if col.unit == '---': col.unit = None # "---" is the marker for no unit in CDS table col.description = (match.group('descr') or '').strip() col.raw_type = match.group('format') col.type = self.get_col_type(col) match = re.match( r'\? (?P<equal> =)? (?P<nullval> \S*) (\s+ (?P<descriptiontext> \S.*))?', col.description, re.VERBOSE) if match: col.description = (match.group('descriptiontext') or '').strip() if issubclass(col.type, core.FloatType): fillval = 'nan' else: fillval = '0' if match.group('nullval') == '-': col.null = '---' # CDS tables can use -, --, ---, or ---- to mark missing values # see https://github.com/astropy/astropy/issues/1335 for i in [1, 2, 3, 4]: self.data.fill_values.append(('-'*i, fillval, col.name)) else: col.null = match.group('nullval') self.data.fill_values.append((col.null, fillval, col.name)) cols.append(col) else: # could be a continuation of the previous col's description if cols: cols[-1].description += line.strip() else: raise ValueError('Line "{}" not parsable as CDS header'.format(line)) self.names = [x.name for x in cols] self.cols = cols class CdsData(core.BaseData): """CDS table data reader """ splitter_class = fixedwidth.FixedWidthSplitter def process_lines(self, lines): """Skip over CDS header by finding the last section delimiter""" # If the header has a ReadMe and data has a filename # then no need to skip, as the data lines do not have header # info. The ``read`` method adds the table_name to the ``data`` # attribute. if self.header.readme and self.table_name: return lines i_sections = [i for i, x in enumerate(lines) if x.startswith(('------', '======='))] if not i_sections: raise core.InconsistentTableError('No CDS section delimiter found') return lines[i_sections[-1]+1:] class Cds(core.BaseReader): """Read a CDS format table. See http://vizier.u-strasbg.fr/doc/catstd.htx. Example:: Table: Table name here = ============================================================================== Catalog reference paper Bibliography info here ================================================================================ ADC_Keywords: Keyword ; Another keyword ; etc Description: Catalog description here. ================================================================================ Byte-by-byte Description of file: datafile3.txt -------------------------------------------------------------------------------- Bytes Format Units Label Explanations -------------------------------------------------------------------------------- 1- 3 I3 --- Index Running identification number 5- 6 I2 h RAh Hour of Right Ascension (J2000) 8- 9 I2 min RAm Minute of Right Ascension (J2000) 11- 15 F5.2 s RAs Second of Right Ascension (J2000) -------------------------------------------------------------------------------- Note (1): A CDS file can contain sections with various metadata. Notes can be multiple lines. Note (2): Another note. -------------------------------------------------------------------------------- 1 03 28 39.09 2 04 18 24.11 **About parsing the CDS format** The CDS format consists of a table description and the table data. These can be in separate files as a ``ReadMe`` file plus data file(s), or combined in a single file. Different subsections within the description are separated by lines of dashes or equal signs ("------" or "======"). The table which specifies the column information must be preceded by a line starting with "Byte-by-byte Description of file:". In the case where the table description is combined with the data values, the data must be in the last section and must be preceded by a section delimiter line (dashes or equal signs only). **Basic usage** Use the ``ascii.read()`` function as normal, with an optional ``readme`` parameter indicating the CDS ReadMe file. If not supplied it is assumed that the header information is at the top of the given table. Examples:: >>> from astropy.io import ascii >>> table = ascii.read("t/cds.dat") >>> table = ascii.read("t/vizier/table1.dat", readme="t/vizier/ReadMe") >>> table = ascii.read("t/cds/multi/lhs2065.dat", readme="t/cds/multi/ReadMe") >>> table = ascii.read("t/cds/glob/lmxbrefs.dat", readme="t/cds/glob/ReadMe") The table name and the CDS ReadMe file can be entered as URLs. This can be used to directly load tables from the Internet. For example, Vizier tables from the CDS:: >>> table = ascii.read("ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/snrs.dat", ... readme="ftp://cdsarc.u-strasbg.fr/pub/cats/VII/253/ReadMe") If the header (ReadMe) and data are stored in a single file and there is content between the header and the data (for instance Notes), then the parsing process may fail. In this case you can instruct the reader to guess the actual start of the data by supplying ``data_start='guess'`` in the call to the ``ascii.read()`` function. You should verify that the output data table matches expectation based on the input CDS file. **Using a reader object** When ``Cds`` reader object is created with a ``readme`` parameter passed to it at initialization, then when the ``read`` method is executed with a table filename, the header information for the specified table is taken from the ``readme`` file. An ``InconsistentTableError`` is raised if the ``readme`` file does not have header information for the given table. >>> readme = "t/vizier/ReadMe" >>> r = ascii.get_reader(ascii.Cds, readme=readme) >>> table = r.read("t/vizier/table1.dat") >>> # table5.dat has the same ReadMe file >>> table = r.read("t/vizier/table5.dat") If no ``readme`` parameter is specified, then the header information is assumed to be at the top of the given table. >>> r = ascii.get_reader(ascii.Cds) >>> table = r.read("t/cds.dat") >>> #The following gives InconsistentTableError, since no >>> #readme file was given and table1.dat does not have a header. >>> table = r.read("t/vizier/table1.dat") Traceback (most recent call last): ... InconsistentTableError: No CDS section delimiter found Caveats: * The Units and Explanations are available in the column ``unit`` and ``description`` attributes, respectively. * The other metadata defined by this format is not available in the output table. """ _format_name = 'cds' _io_registry_format_aliases = ['cds'] _io_registry_can_write = False _description = 'CDS format table' data_class = CdsData header_class = CdsHeader def __init__(self, readme=None): super().__init__() self.header.readme = readme def write(self, table=None): """Not available for the Cds class (raises NotImplementedError)""" raise NotImplementedError def read(self, table): # If the read kwarg `data_start` is 'guess' then the table may have extraneous # lines between the end of the header and the beginning of data. if self.data.start_line == 'guess': # Replicate the first part of BaseReader.read up to the point where # the table lines are initially read in. with suppress(TypeError): # For strings only if os.linesep not in table + '': self.data.table_name = os.path.basename(table) self.data.header = self.header self.header.data = self.data # Get a list of the lines (rows) in the table lines = self.inputter.get_lines(table) # Now try increasing data.start_line by one until the table reads successfully. # For efficiency use the in-memory list of lines instead of `table`, which # could be a file. for data_start in range(len(lines)): self.data.start_line = data_start with suppress(Exception): table = super().read(lines) return table else: return super().read(table)
8dbf451722b8f75ddb21ee7828f214ae61b780e147f21277977a398d9f1369d2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. basic.py: Basic table read / write functionality for simple character delimited files with various options for column header definition. :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re from . import core class BasicHeader(core.BaseHeader): """ Basic table Header Reader Set a few defaults for common ascii table formats (start at line 0, comments begin with ``#`` and possibly white space) """ start_line = 0 comment = r'\s*#' write_comment = '# ' class BasicData(core.BaseData): """ Basic table Data Reader Set a few defaults for common ascii table formats (start at line 1, comments begin with ``#`` and possibly white space) """ start_line = 1 comment = r'\s*#' write_comment = '# ' class Basic(core.BaseReader): r""" Read a character-delimited table with a single header line at the top followed by data lines to the end of the table. Lines beginning with # as the first non-whitespace character are comments. This reader is highly configurable. :: rdr = ascii.get_reader(Reader=ascii.Basic) rdr.header.splitter.delimiter = ' ' rdr.data.splitter.delimiter = ' ' rdr.header.start_line = 0 rdr.data.start_line = 1 rdr.data.end_line = None rdr.header.comment = r'\s*#' rdr.data.comment = r'\s*#' Example table:: # Column definition is the first uncommented line # Default delimiter is the space character. apples oranges pears # Data starts after the header column definition, blank lines ignored 1 2 3 4 5 6 """ _format_name = 'basic' _description = 'Basic table with custom delimiters' header_class = BasicHeader data_class = BasicData class NoHeaderHeader(BasicHeader): """ Reader for table header without a header Set the start of header line number to `None`, which tells the basic reader there is no header line. """ start_line = None class NoHeaderData(BasicData): """ Reader for table data without a header Data starts at first uncommented line since there is no header line. """ start_line = 0 class NoHeader(Basic): """ Read a table with no header line. Columns are autonamed using header.auto_format which defaults to "col%d". Otherwise this reader the same as the :class:`Basic` class from which it is derived. Example:: # Table data 1 2 "hello there" 3 4 world """ _format_name = 'no_header' _description = 'Basic table with no headers' header_class = NoHeaderHeader data_class = NoHeaderData class CommentedHeaderHeader(BasicHeader): """ Header class for which the column definition line starts with the comment character. See the :class:`CommentedHeader` class for an example. """ def process_lines(self, lines): """ Return only lines that start with the comment regexp. For these lines strip out the matching characters. """ re_comment = re.compile(self.comment) for line in lines: match = re_comment.match(line) if match: yield line[match.end():] def write(self, lines): lines.append(self.write_comment + self.splitter.join(self.colnames)) class CommentedHeader(Basic): """ Read a file where the column names are given in a line that begins with the header comment character. ``header_start`` can be used to specify the line index of column names, and it can be a negative index (for example -1 for the last commented line). The default delimiter is the <space> character.:: # col1 col2 col3 # Comment line 1 2 3 4 5 6 """ _format_name = 'commented_header' _description = 'Column names in a commented line' header_class = CommentedHeaderHeader data_class = NoHeaderData def read(self, table): """ Read input data (file-like object, filename, list of strings, or single string) into a Table and return the result. """ out = super().read(table) # Strip off first comment since this is the header line for # commented_header format. if 'comments' in out.meta: out.meta['comments'] = out.meta['comments'][1:] if not out.meta['comments']: del out.meta['comments'] return out def write_header(self, lines, meta): """ Write comment lines after, rather than before, the header. """ self.header.write(lines) self.header.write_comments(lines, meta) class TabHeaderSplitter(core.DefaultSplitter): """Split lines on tab and do not remove whitespace""" delimiter = '\t' process_line = None class TabDataSplitter(TabHeaderSplitter): """ Don't strip data value whitespace since that is significant in TSV tables """ process_val = None skipinitialspace = False class TabHeader(BasicHeader): """ Reader for header of tables with tab separated header """ splitter_class = TabHeaderSplitter class TabData(BasicData): """ Reader for data of tables with tab separated data """ splitter_class = TabDataSplitter class Tab(Basic): """ Read a tab-separated file. Unlike the :class:`Basic` reader, whitespace is not stripped from the beginning and end of either lines or individual column values. Example:: col1 <tab> col2 <tab> col3 # Comment line 1 <tab> 2 <tab> 5 """ _format_name = 'tab' _description = 'Basic table with tab-separated values' header_class = TabHeader data_class = TabData class CsvSplitter(core.DefaultSplitter): """ Split on comma for CSV (comma-separated-value) tables """ delimiter = ',' class CsvHeader(BasicHeader): """ Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter` """ splitter_class = CsvSplitter comment = None write_comment = None class CsvData(BasicData): """ Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter` """ splitter_class = CsvSplitter fill_values = [(core.masked, '')] comment = None write_comment = None class Csv(Basic): """ Read a CSV (comma-separated-values) file. Example:: num,ra,dec,radius,mag 1,32.23222,10.1211,0.8,18.1 2,38.12321,-88.1321,2.2,17.0 Plain csv (comma separated value) files typically contain as many entries as there are columns on each line. In contrast, common spreadsheet editors stop writing if all remaining cells on a line are empty, which can lead to lines where the rightmost entries are missing. This Reader can deal with such files. Masked values (indicated by an empty '' field value when reading) are written out in the same way with an empty ('') field. This is different from the typical default for `astropy.io.ascii` in which missing values are indicated by ``--``. Example:: num,ra,dec,radius,mag 1,32.23222,10.1211 2,38.12321,-88.1321,2.2,17.0 """ _format_name = 'csv' _io_registry_can_write = True _description = 'Comma-separated-values' header_class = CsvHeader data_class = CsvData def inconsistent_handler(self, str_vals, ncols): """ Adjust row if it is too short. If a data row is shorter than the header, add empty values to make it the right length. Note that this will *not* be called if the row already matches the header. Parameters ---------- str_vals : list A list of value strings from the current row of the table. ncols : int The expected number of entries from the table header. Returns ------- str_vals : list List of strings to be parsed into data entries in the output table. """ if len(str_vals) < ncols: str_vals.extend((ncols - len(str_vals)) * ['']) return str_vals class RdbHeader(TabHeader): """ Header for RDB tables """ col_type_map = {'n': core.NumType, 's': core.StrType} def get_type_map_key(self, col): return col.raw_type[-1] def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines``. This is a specialized get_cols for the RDB type: Line 0: RDB col names Line 1: RDB col definitions Line 2+: RDB data rows Parameters ---------- lines : list List of table lines Returns ------- None """ header_lines = self.process_lines(lines) # this is a generator header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))] if len(header_vals_list) != 2: raise ValueError('RDB header requires 2 lines') self.names, raw_types = header_vals_list if len(self.names) != len(raw_types): raise ValueError('RDB header mismatch between number of column names and column types') if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types): raise ValueError('RDB types definitions do not all match [num](N|S): {}'.format(raw_types)) self._set_cols_from_names() for col, raw_type in zip(self.cols, raw_types): col.raw_type = raw_type col.type = self.get_col_type(col) def write(self, lines): lines.append(self.splitter.join(self.colnames)) rdb_types = [] for col in self.cols: # Check if dtype.kind is string or unicode. See help(np.core.numerictypes) rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N' rdb_types.append(rdb_type) lines.append(self.splitter.join(rdb_types)) class RdbData(TabData): """ Data reader for RDB data. Starts reading at line 2. """ start_line = 2 class Rdb(Tab): """ Read a tab-separated file with an extra line after the column definition line. The RDB format meets this definition. Example:: col1 <tab> col2 <tab> col3 N <tab> S <tab> N 1 <tab> 2 <tab> 5 In this reader the second line is just ignored. """ _format_name = 'rdb' _io_registry_format_aliases = ['rdb'] _io_registry_suffix = '.rdb' _description = 'Tab-separated with a type definition header line' header_class = RdbHeader data_class = RdbData
1a8b94f4ec762baa15860b9b784229a3162c629c1a4bf3b0d7381f04a3426a26
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects the readers/writers to the astropy.table.Table class import re import functools from .. import registry as io_registry from ...table import Table __all__ = [] # Generic # ======= def read_asciitable(filename, **kwargs): from .ui import read return read(filename, **kwargs) io_registry.register_reader('ascii', Table, read_asciitable) def write_asciitable(table, filename, **kwargs): from .ui import write return write(table, filename, **kwargs) io_registry.register_writer('ascii', Table, write_asciitable) def io_read(format, filename, **kwargs): from .ui import read format = re.sub(r'^ascii\.', '', format) return read(filename, format=format, **kwargs) def io_write(format, table, filename, **kwargs): from .ui import write format = re.sub(r'^ascii\.', '', format) return write(table, filename, format=format, **kwargs) def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(suffix) def _get_connectors_table(): from .core import FORMAT_CLASSES rows = [] rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)')) for format in sorted(FORMAT_CLASSES): cls = FORMAT_CLASSES[format] io_format = 'ascii.' + cls._format_name description = getattr(cls, '_description', '') class_link = ':class:`~{0}.{1}`'.format(cls.__module__, cls.__name__) suffix = getattr(cls, '_io_registry_suffix', '') can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else '' rows.append((io_format, suffix, can_write, '{0}: {1}'.format(class_link, description))) out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description')) for colname in ('Format', 'Description'): width = max(len(x) for x in out[colname]) out[colname].format = '%-{0}s'.format(width) return out # Specific # ======== def read_csv(filename, **kwargs): from .ui import read kwargs['format'] = 'csv' return read(filename, **kwargs) def write_csv(table, filename, **kwargs): from .ui import write kwargs['format'] = 'csv' return write(table, filename, **kwargs) csv_identify = functools.partial(io_identify, '.csv') io_registry.register_reader('csv', Table, read_csv) io_registry.register_writer('csv', Table, write_csv) io_registry.register_identifier('csv', Table, csv_identify)
6d9ded624ba6b5fa200aa69a8e582f11ae4f9a4af3c88dd34e1b22a1662ec4c7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ An extensible ASCII table reader and writer. core.py: Core base classes and functions for reading and writing tables. :Copyright: Smithsonian Astrophysical Observatory (2010) :Author: Tom Aldcroft ([email protected]) """ import copy import csv import functools import itertools import operator import os import re import warnings from collections import OrderedDict from contextlib import suppress from io import StringIO import numpy from ...utils.exceptions import AstropyWarning from ...table import Table from ...utils.data import get_readable_fileobj from . import connect # Global dictionary mapping format arg to the corresponding Reader class FORMAT_CLASSES = {} # Similar dictionary for fast readers FAST_CLASSES = {} class CsvWriter: """ Internal class to replace the csv writer ``writerow`` and ``writerows`` functions so that in the case of ``delimiter=' '`` and ``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty fields (when value == ''). This changes the API slightly in that the writerow() and writerows() methods return the output written string instead of the length of that string. Examples -------- >>> from astropy.io.ascii.core import CsvWriter >>> writer = CsvWriter(delimiter=' ') >>> print(writer.writerow(['hello', '', 'world'])) hello "" world """ # Random 16-character string that gets injected instead of any # empty fields and is then replaced post-write with doubled-quotechar. # Created with: # ''.join(random.choice(string.printable[:90]) for _ in range(16)) replace_sentinel = '2b=48Av%0-V3p>bX' def __init__(self, csvfile=None, **kwargs): self.csvfile = csvfile # Temporary StringIO for catching the real csv.writer() object output self.temp_out = StringIO() self.writer = csv.writer(self.temp_out, **kwargs) dialect = self.writer.dialect self.quotechar2 = dialect.quotechar * 2 self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ') def writerow(self, values): """ Similar to csv.writer.writerow but with the custom quoting behavior. Returns the written string instead of the length of that string. """ has_empty = False # If QUOTE_MINIMAL and space-delimited then replace empty fields with # the sentinel value. if self.quote_empty: for i, value in enumerate(values): if value == '': has_empty = True values[i] = self.replace_sentinel return self._writerow(self.writer.writerow, values, has_empty) def writerows(self, values_list): """ Similar to csv.writer.writerows but with the custom quoting behavior. Returns the written string instead of the length of that string. """ has_empty = False # If QUOTE_MINIMAL and space-delimited then replace empty fields with # the sentinel value. if self.quote_empty: for values in values_list: for i, value in enumerate(values): if value == '': has_empty = True values[i] = self.replace_sentinel return self._writerow(self.writer.writerows, values_list, has_empty) def _writerow(self, writerow_func, values, has_empty): """ Call ``writerow_func`` (either writerow or writerows) with ``values``. If it has empty fields that have been replaced then change those sentinel strings back to quoted empty strings, e.g. ``""``. """ # Clear the temporary StringIO buffer that self.writer writes into and # then call the real csv.writer().writerow or writerows with values. self.temp_out.seek(0) self.temp_out.truncate() writerow_func(values) row_string = self.temp_out.getvalue() if self.quote_empty and has_empty: row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string) # self.csvfile is defined then write the output. In practice the pure # Python writer calls with csvfile=None, while the fast writer calls with # a file-like object. if self.csvfile: self.csvfile.write(row_string) return row_string class MaskedConstant(numpy.ma.core.MaskedConstant): """A trivial extension of numpy.ma.masked We want to be able to put the generic term ``masked`` into a dictionary. The constant ``numpy.ma.masked`` is not hashable (see https://github.com/numpy/numpy/issues/4660), so we need to extend it here with a hash value. """ def __hash__(self): '''All instances of this class shall have the same hash.''' # Any large number will do. return 1234567890 masked = MaskedConstant() class InconsistentTableError(ValueError): """ Indicates that an input table is inconsistent in some way. The default behavior of ``BaseReader`` is to throw an instance of this class if a data row doesn't match the header. """ class OptionalTableImportError(ImportError): """ Indicates that a dependency for table reading is not present. An instance of this class is raised whenever an optional reader with certain required dependencies cannot operate because of an ImportError. """ class ParameterError(NotImplementedError): """ Indicates that a reader cannot handle a passed parameter. The C-based fast readers in ``io.ascii`` raise an instance of this error class upon encountering a parameter that the C engine cannot handle. """ class FastOptionsError(NotImplementedError): """ Indicates that one of the specified options for fast reading is invalid. """ class NoType: """ Superclass for ``StrType`` and ``NumType`` classes. This class is the default type of ``Column`` and provides a base class for other data types. """ class StrType(NoType): """ Indicates that a column consists of text data. """ class NumType(NoType): """ Indicates that a column consists of numerical data. """ class FloatType(NumType): """ Describes floating-point data. """ class BoolType(NoType): """ Describes boolean data. """ class IntType(NumType): """ Describes integer data. """ class AllType(StrType, FloatType, IntType): """ Subclass of all other data types. This type is returned by ``convert_numpy`` if the given numpy type does not match ``StrType``, ``FloatType``, or ``IntType``. """ class Column: """Table column. The key attributes of a Column object are: * **name** : column name * **type** : column type (NoType, StrType, NumType, FloatType, IntType) * **dtype** : numpy dtype (optional, overrides **type** if set) * **str_vals** : list of column values as strings * **data** : list of converted column values """ def __init__(self, name): self.name = name self.type = NoType # Generic type (Int, Float, Str etc) self.dtype = None # Numpy dtype if available self.str_vals = [] self.fill_values = {} class BaseInputter: """ Get the lines from the table input and return a list of lines. """ encoding = None """Encoding used to read the file""" def get_lines(self, table): """ Get the lines from the ``table`` input. The input table can be one of: * File name * String (newline separated) with all header and data lines (must have at least 2 lines) * File-like object with read() method * List of strings Parameters ---------- table : str, file_like, list Can be either a file name, string (newline separated) with all header and data lines (must have at least 2 lines), a file-like object with a ``read()`` method, or a list of strings. Returns ------- lines : list List of lines """ try: if (hasattr(table, 'read') or ('\n' not in table + '' and '\r' not in table + '')): with get_readable_fileobj(table, encoding=self.encoding) as fileobj: table = fileobj.read() lines = table.splitlines() except TypeError: try: # See if table supports indexing, slicing, and iteration table[0] table[0:1] iter(table) lines = table except TypeError: raise TypeError( 'Input "table" must be a string (filename or data) or an iterable') return self.process_lines(lines) def process_lines(self, lines): """Process lines for subsequent use. In the default case do nothing. This routine is not generally intended for removing comment lines or stripping whitespace. These are done (if needed) in the header and data line processing. Override this method if something more has to be done to convert raw input lines to the table rows. For example the ContinuationLinesInputter derived class accounts for continuation characters if a row is split into lines.""" return lines class BaseSplitter: """ Base splitter that uses python's split method to do the work. This does not handle quoted values. A key feature is the formulation of __call__ as a generator that returns a list of the split line values at each iteration. There are two methods that are intended to be overridden, first ``process_line()`` to do pre-processing on each input line before splitting and ``process_val()`` to do post-processing on each split string value. By default these apply the string ``strip()`` function. These can be set to another function via the instance attribute or be disabled entirely, for example:: reader.header.splitter.process_val = lambda x: x.lstrip() reader.data.splitter.process_val = None """ delimiter = None """ one-character string used to separate fields """ def process_line(self, line): """Remove whitespace at the beginning or end of line. This is especially useful for whitespace-delimited files to prevent spurious columns at the beginning or end.""" return line.strip() def process_val(self, val): """Remove whitespace at the beginning or end of value.""" return val.strip() def __call__(self, lines): if self.process_line: lines = (self.process_line(x) for x in lines) for line in lines: vals = line.split(self.delimiter) if self.process_val: yield [self.process_val(x) for x in vals] else: yield vals def join(self, vals): if self.delimiter is None: delimiter = ' ' else: delimiter = self.delimiter return delimiter.join(str(x) for x in vals) class DefaultSplitter(BaseSplitter): """Default class to split strings into columns using python csv. The class attributes are taken from the csv Dialect class. Typical usage:: # lines = .. splitter = ascii.DefaultSplitter() for col_vals in splitter(lines): for col_val in col_vals: ... """ delimiter = ' ' """ one-character string used to separate fields. """ quotechar = '"' """ control how instances of *quotechar* in a field are quoted """ doublequote = True """ character to remove special meaning from following character """ escapechar = None """ one-character stringto quote fields containing special characters """ quoting = csv.QUOTE_MINIMAL """ control when quotes are recognised by the reader """ skipinitialspace = True """ ignore whitespace immediately following the delimiter """ csv_writer = None csv_writer_out = StringIO() def process_line(self, line): """Remove whitespace at the beginning or end of line. This is especially useful for whitespace-delimited files to prevent spurious columns at the beginning or end. If splitting on whitespace then replace unquoted tabs with space first""" if self.delimiter == r'\s': line = _replace_tab_with_space(line, self.escapechar, self.quotechar) return line.strip() def __call__(self, lines): """Return an iterator over the table ``lines``, where each iterator output is a list of the split line values. Parameters ---------- lines : list List of table lines Returns ------- lines : iterator """ if self.process_line: lines = [self.process_line(x) for x in lines] delimiter = ' ' if self.delimiter == r'\s' else self.delimiter csv_reader = csv.reader(lines, delimiter=delimiter, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, quoting=self.quoting, skipinitialspace=self.skipinitialspace ) for vals in csv_reader: if self.process_val: yield [self.process_val(x) for x in vals] else: yield vals def join(self, vals): delimiter = ' ' if self.delimiter is None else str(self.delimiter) if self.csv_writer is None: self.csv_writer = CsvWriter(delimiter=delimiter, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, quoting=self.quoting, lineterminator='') if self.process_val: vals = [self.process_val(x) for x in vals] out = self.csv_writer.writerow(vals) return out def _replace_tab_with_space(line, escapechar, quotechar): """Replace tabs with spaces in given string, preserving quoted substrings Parameters ---------- line : str String containing tabs to be replaced with spaces. escapechar : str Character in ``line`` used to escape special characters. quotechar : str Character in ``line`` indicating the start/end of a substring. Returns ------- line : str A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings. """ newline = [] in_quote = False lastchar = 'NONE' for char in line: if char == quotechar and lastchar != escapechar: in_quote = not in_quote if char == '\t' and not in_quote: char = ' ' lastchar = char newline.append(char) return ''.join(newline) def _get_line_index(line_or_func, lines): """Return the appropriate line index, depending on ``line_or_func`` which can be either a function, a positive or negative int, or None. """ if hasattr(line_or_func, '__call__'): return line_or_func(lines) elif line_or_func: if line_or_func >= 0: return line_or_func else: n_lines = sum(1 for line in lines) return n_lines + line_or_func else: return line_or_func class BaseHeader: """ Base table header reader """ auto_format = 'col{}' """ format string for auto-generating column names """ start_line = None """ None, int, or a function of ``lines`` that returns None or int """ comment = None """ regular expression for comment lines """ splitter_class = DefaultSplitter """ Splitter class for splitting data lines into columns """ names = None """ list of names corresponding to each data column """ write_comment = False write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE'] def __init__(self): self.splitter = self.splitter_class() def _set_cols_from_names(self): self.cols = [Column(name=x) for x in self.names] def update_meta(self, lines, meta): """ Extract any table-level metadata, e.g. keywords, comments, column metadata, from the table ``lines`` and update the OrderedDict ``meta`` in place. This base method extracts comment lines and stores them in ``meta`` for output. """ if self.comment: re_comment = re.compile(self.comment) comment_lines = [x for x in lines if re_comment.match(x)] else: comment_lines = [] comment_lines = [re.sub('^' + self.comment, '', x).strip() for x in comment_lines] if comment_lines: meta.setdefault('table', {})['comments'] = comment_lines def get_cols(self, lines): """Initialize the header Column objects from the table ``lines``. Based on the previously set Header attributes find or create the column names. Sets ``self.cols`` with the list of Columns. Parameters ---------- lines : list List of table lines """ start_line = _get_line_index(self.start_line, self.process_lines(lines)) if start_line is None: # No header line so auto-generate names from n_data_cols # Get the data values from the first line of table data to determine n_data_cols try: first_data_vals = next(self.data.get_str_vals()) except StopIteration: raise InconsistentTableError('No data lines found so cannot autogenerate ' 'column names') n_data_cols = len(first_data_vals) self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)] else: for i, line in enumerate(self.process_lines(lines)): if i == start_line: break else: # No header line matching raise ValueError('No header line found in table') self.names = next(self.splitter([line])) self._set_cols_from_names() def process_lines(self, lines): """Generator to yield non-blank and non-comment lines""" if self.comment: re_comment = re.compile(self.comment) # Yield non-comment lines for line in lines: if line.strip() and (not self.comment or not re_comment.match(line)): yield line def write_comments(self, lines, meta): if self.write_comment is not False: for comment in meta.get('comments', []): lines.append(self.write_comment + comment) def write(self, lines): if self.start_line is not None: for i, spacer_line in zip(range(self.start_line), itertools.cycle(self.write_spacer_lines)): lines.append(spacer_line) lines.append(self.splitter.join([x.info.name for x in self.cols])) @property def colnames(self): """Return the column names of the table""" return tuple(col.name if isinstance(col, Column) else col.info.name for col in self.cols) def get_type_map_key(self, col): return col.raw_type def get_col_type(self, col): try: type_map_key = self.get_type_map_key(col) return self.col_type_map[type_map_key.lower()] except KeyError: raise ValueError('Unknown data type ""{}"" for column "{}"'.format( col.raw_type, col.name)) def check_column_names(self, names, strict_names, guessing): """ Check column names. This must be done before applying the names transformation so that guessing will fail appropriately if ``names`` is supplied. For instance if the basic reader is given a table with no column header row. Parameters ---------- names : list User-supplied list of column names strict_names : bool Whether to impose extra requirements on names guessing : bool True if this method is being called while guessing the table format """ if strict_names: # Impose strict requirements on column names (normally used in guessing) bads = [" ", ",", "|", "\t", "'", '"'] for name in self.colnames: if (_is_number(name) or len(name) == 0 or name[0] in bads or name[-1] in bads): raise ValueError('Column name {0!r} does not meet strict name requirements' .format(name)) # When guessing require at least two columns if guessing and len(self.colnames) <= 1: raise ValueError('Table format guessing requires at least two columns, got {}' .format(list(self.colnames))) if names is not None and len(names) != len(self.colnames): raise ValueError('Length of names argument ({0}) does not match number' ' of table columns ({1})'.format(len(names), len(self.colnames))) class BaseData: """ Base table data reader. """ start_line = None """ None, int, or a function of ``lines`` that returns None or int """ end_line = None """ None, int, or a function of ``lines`` that returns None or int """ comment = None """ Regular expression for comment lines """ splitter_class = DefaultSplitter """ Splitter class for splitting data lines into columns """ write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE'] fill_include_names = None fill_exclude_names = None fill_values = [(masked, '')] formats = {} def __init__(self): # Need to make sure fill_values list is instance attribute, not class attribute. # On read, this will be overwritten by the default in the ui.read (thus, in # the current implementation there can be no different default for different # Readers). On write, ui.py does not specify a default, so this line here matters. self.fill_values = copy.copy(self.fill_values) self.formats = copy.copy(self.formats) self.splitter = self.splitter_class() def process_lines(self, lines): """ Strip out comment lines and blank lines from list of ``lines`` Parameters ---------- lines : list All lines in table Returns ------- lines : list List of lines """ nonblank_lines = (x for x in lines if x.strip()) if self.comment: re_comment = re.compile(self.comment) return [x for x in nonblank_lines if not re_comment.match(x)] else: return [x for x in nonblank_lines] def get_data_lines(self, lines): """Set the ``data_lines`` attribute to the lines slice comprising the table data values.""" data_lines = self.process_lines(lines) start_line = _get_line_index(self.start_line, data_lines) end_line = _get_line_index(self.end_line, data_lines) if start_line is not None or end_line is not None: self.data_lines = data_lines[slice(start_line, end_line)] else: # Don't copy entire data lines unless necessary self.data_lines = data_lines def get_str_vals(self): """Return a generator that returns a list of column values (as strings) for each data line.""" return self.splitter(self.data_lines) def masks(self, cols): """Set fill value for each column and then apply that fill value In the first step it is evaluated with value from ``fill_values`` applies to which column using ``fill_include_names`` and ``fill_exclude_names``. In the second step all replacements are done for the appropriate columns. """ if self.fill_values: self._set_fill_values(cols) self._set_masks(cols) def _set_fill_values(self, cols): """Set the fill values of the individual cols based on fill_values of BaseData fill values has the following form: <fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...) fill_values = <fill_spec> or list of <fill_spec>'s """ if self.fill_values: # when we write tables the columns may be astropy.table.Columns # which don't carry a fill_values by default for col in cols: if not hasattr(col, 'fill_values'): col.fill_values = {} # if input is only one <fill_spec>, then make it a list with suppress(TypeError): self.fill_values[0] + '' self.fill_values = [self.fill_values] # Step 1: Set the default list of columns which are affected by # fill_values colnames = set(self.header.colnames) if self.fill_include_names is not None: colnames.intersection_update(self.fill_include_names) if self.fill_exclude_names is not None: colnames.difference_update(self.fill_exclude_names) # Step 2a: Find out which columns are affected by this tuple # iterate over reversed order, so last condition is set first and # overwritten by earlier conditions for replacement in reversed(self.fill_values): if len(replacement) < 2: raise ValueError("Format of fill_values must be " "(<bad>, <fill>, <optional col1>, ...)") elif len(replacement) == 2: affect_cols = colnames else: affect_cols = replacement[2:] for i, key in ((i, x) for i, x in enumerate(self.header.colnames) if x in affect_cols): cols[i].fill_values[replacement[0]] = str(replacement[1]) def _set_masks(self, cols): """Replace string values in col.str_vals and set masks""" if self.fill_values: for col in (col for col in cols if col.fill_values): col.mask = numpy.zeros(len(col.str_vals), dtype=numpy.bool) for i, str_val in ((i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values): col.str_vals[i] = col.fill_values[str_val] col.mask[i] = True def _replace_vals(self, cols): """Replace string values in col.str_vals""" if self.fill_values: for col in (col for col in cols if col.fill_values): for i, str_val in ((i, x) for i, x in enumerate(col.str_vals) if x in col.fill_values): col.str_vals[i] = col.fill_values[str_val] if masked in col.fill_values and hasattr(col, 'mask'): mask_val = col.fill_values[masked] for i in col.mask.nonzero()[0]: col.str_vals[i] = mask_val def str_vals(self): '''convert all values in table to a list of lists of strings''' self._set_fill_values(self.cols) self._set_col_formats() for col in self.cols: col.str_vals = list(col.info.iter_str_vals()) self._replace_vals(self.cols) return [col.str_vals for col in self.cols] def write(self, lines): if hasattr(self.start_line, '__call__'): raise TypeError('Start_line attribute cannot be callable for write()') else: data_start_line = self.start_line or 0 while len(lines) < data_start_line: lines.append(itertools.cycle(self.write_spacer_lines)) col_str_iters = self.str_vals() for vals in zip(*col_str_iters): lines.append(self.splitter.join(vals)) def _set_col_formats(self): """ """ for col in self.cols: if col.info.name in self.formats: col.info.format = self.formats[col.name] def convert_numpy(numpy_type): """Return a tuple containing a function which converts a list into a numpy array and the type produced by the converter function. Parameters ---------- numpy_type : numpy data-type The numpy type required of an array returned by ``converter``. Must be a valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_, e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float, numpy.float64, numpy.str. Returns ------- (converter, converter_type) : (function, generic data-type) ``converter`` is a function which accepts a list and converts it to a numpy array of type ``numpy_type``. ``converter_type`` tracks the generic data type produced by the converter function. Raises ------ ValueError Raised by ``converter`` if the list elements could not be converted to the required type. """ # Infer converter type from an instance of numpy_type. type_name = numpy.array([], dtype=numpy_type).dtype.name if 'int' in type_name: converter_type = IntType elif 'float' in type_name: converter_type = FloatType elif 'bool' in type_name: converter_type = BoolType elif 'str' in type_name: converter_type = StrType else: converter_type = AllType def bool_converter(vals): """ Convert values "False" and "True" to bools. Raise an exception for any other string values. """ if len(vals) == 0: return numpy.array([], dtype=bool) # Try a smaller subset first for a long array if len(vals) > 10000: svals = numpy.asarray(vals[:1000]) if not numpy.all((svals == 'False') | (svals == 'True')): raise ValueError('bool input strings must be only False or True') vals = numpy.asarray(vals) trues = vals == 'True' falses = vals == 'False' if not numpy.all(trues | falses): raise ValueError('bool input strings must be only False or True') return trues def generic_converter(vals): return numpy.array(vals, numpy_type) converter = bool_converter if converter_type is BoolType else generic_converter return converter, converter_type class BaseOutputter: """Output table as a dict of column objects keyed on column name. The table data are stored as plain python lists within the column objects. """ converters = {} # Derived classes must define default_converters and __call__ @staticmethod def _validate_and_copy(col, converters): """Validate the format for the type converters and then copy those which are valid converters for this column (i.e. converter type is a subclass of col.type)""" converters_out = [] try: for converter in converters: converter_func, converter_type = converter if not issubclass(converter_type, NoType): raise ValueError() if issubclass(converter_type, col.type): converters_out.append((converter_func, converter_type)) except (ValueError, TypeError): raise ValueError('Error: invalid format for converters, see ' 'documentation\n{}'.format(converters)) return converters_out def _convert_vals(self, cols): for col in cols: # If a specific dtype was specified for a column, then use that # to set the defaults, otherwise use the generic defaults. default_converters = ([convert_numpy(col.dtype)] if col.dtype else self.default_converters) # If the user supplied a specific convert then that takes precedence over defaults converters = self.converters.get(col.name, default_converters) col.converters = self._validate_and_copy(col, converters) # Catch the last error in order to provide additional information # in case all attempts at column conversion fail. The initial # value of of last_error will apply if no converters are defined # and the first col.converters[0] access raises IndexError. last_err = 'no converters defined' while not hasattr(col, 'data'): try: converter_func, converter_type = col.converters[0] if not issubclass(converter_type, col.type): raise TypeError('converter type does not match column type') col.data = converter_func(col.str_vals) col.type = converter_type except (TypeError, ValueError) as err: col.converters.pop(0) last_err = err except OverflowError as err: # Overflow during conversion (most likely an int that doesn't fit in native C long). # Put string at the top of the converters list for the next while iteration. warnings.warn("OverflowError converting to {0} for column {1}, using string instead." .format(converter_type.__name__, col.name), AstropyWarning) col.converters.insert(0, convert_numpy(numpy.str)) last_err = err except IndexError: raise ValueError('Column {} failed to convert: {}'.format(col.name, last_err)) class TableOutputter(BaseOutputter): """ Output the table as an astropy.table.Table object. """ default_converters = [convert_numpy(numpy.int), convert_numpy(numpy.float), convert_numpy(numpy.str)] def __call__(self, cols, meta): # Sets col.data to numpy array and col.type to io.ascii Type class (e.g. # FloatType) for each col. self._convert_vals(cols) # If there are any values that were filled and tagged with a mask bit then this # will be a masked table. Otherwise use a plain table. masked = any(hasattr(col, 'mask') and numpy.any(col.mask) for col in cols) out = Table([x.data for x in cols], names=[x.name for x in cols], masked=masked, meta=meta['table']) for col, out_col in zip(cols, out.columns.values()): if masked and hasattr(col, 'mask'): out_col.data.mask = col.mask for attr in ('format', 'unit', 'description'): if hasattr(col, attr): setattr(out_col, attr, getattr(col, attr)) if hasattr(col, 'meta'): out_col.meta.update(col.meta) return out class MetaBaseReader(type): def __init__(cls, name, bases, dct): super().__init__(name, bases, dct) format = dct.get('_format_name') if format is None: return fast = dct.get('_fast') if fast is not None: FAST_CLASSES[format] = cls FORMAT_CLASSES[format] = cls io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', []) if dct.get('_io_registry_suffix'): func = functools.partial(connect.io_identify, dct['_io_registry_suffix']) connect.io_registry.register_identifier(io_formats[0], Table, func) for io_format in io_formats: func = functools.partial(connect.io_read, io_format) connect.io_registry.register_reader(io_format, Table, func) if dct.get('_io_registry_can_write', True): func = functools.partial(connect.io_write, io_format) connect.io_registry.register_writer(io_format, Table, func) def _is_number(x): with suppress(ValueError): x = float(x) return True return False def _apply_include_exclude_names(table, names, include_names, exclude_names): """ Apply names, include_names and exclude_names to a table. Parameters ---------- table : `~astropy.table.Table` Input table names : list List of names to override those in table (set to None to use existing names) include_names : list List of names to include in output exclude_names : list List of names to exclude from output (applied after ``include_names``) """ if names is not None: # Rename table column names to those passed by user # Temporarily rename with names that are not in `names` or `table.colnames`. # This ensures that rename succeeds regardless of existing names. xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames)) for ii, colname in enumerate(table.colnames): table.rename_column(colname, xxxs + str(ii)) for ii, name in enumerate(names): table.rename_column(xxxs + str(ii), name) names = set(table.colnames) if include_names is not None: names.intersection_update(include_names) if exclude_names is not None: names.difference_update(exclude_names) if names != set(table.colnames): remove_names = set(table.colnames) - set(names) table.remove_columns(remove_names) class BaseReader(metaclass=MetaBaseReader): """Class providing methods to read and write an ASCII table using the specified header, data, inputter, and outputter instances. Typical usage is to instantiate a Reader() object and customize the ``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each of these is an object of the corresponding class. There is one method ``inconsistent_handler`` that can be used to customize the behavior of ``read()`` in the event that a data row doesn't match the header. The default behavior is to raise an InconsistentTableError. """ names = None include_names = None exclude_names = None strict_names = False guessing = False encoding = None header_class = BaseHeader data_class = BaseData inputter_class = BaseInputter outputter_class = TableOutputter def __init__(self): self.header = self.header_class() self.data = self.data_class() self.inputter = self.inputter_class() self.outputter = self.outputter_class() # Data and Header instances benefit from a little cross-coupling. Header may need to # know about number of data columns for auto-column name generation and Data may # need to know about header (e.g. for fixed-width tables where widths are spec'd in header. self.data.header = self.header self.header.data = self.data # Metadata, consisting of table-level meta and column-level meta. The latter # could include information about column type, description, formatting, etc, # depending on the table meta format. self.meta = OrderedDict(table=OrderedDict(), cols=OrderedDict()) def read(self, table): """Read the ``table`` and return the results in a format determined by the ``outputter`` attribute. The ``table`` parameter is any string or object that can be processed by the instance ``inputter``. For the base Inputter class ``table`` can be one of: * File name * File-like object * String (newline separated) with all header and data lines (must have at least 2 lines) * List of strings Parameters ---------- table : str, file_like, list Input table. Returns ------- table : `~astropy.table.Table` Output table """ # If ``table`` is a file then store the name in the ``data`` # attribute. The ``table`` is a "file" if it is a string # without the new line specific to the OS. with suppress(TypeError): # Strings only if os.linesep not in table + '': self.data.table_name = os.path.basename(table) # Get a list of the lines (rows) in the table self.lines = self.inputter.get_lines(table) # Set self.data.data_lines to a slice of lines contain the data rows self.data.get_data_lines(self.lines) # Extract table meta values (e.g. keywords, comments, etc). Updates self.meta. self.header.update_meta(self.lines, self.meta) # Get the table column definitions self.header.get_cols(self.lines) # Make sure columns are valid self.header.check_column_names(self.names, self.strict_names, self.guessing) self.cols = cols = self.header.cols self.data.splitter.cols = cols n_cols = len(cols) for i, str_vals in enumerate(self.data.get_str_vals()): if len(str_vals) != n_cols: str_vals = self.inconsistent_handler(str_vals, n_cols) # if str_vals is None, we skip this row if str_vals is None: continue # otherwise, we raise an error only if it is still inconsistent if len(str_vals) != n_cols: errmsg = ('Number of header columns ({}) inconsistent with' ' data columns ({}) at data line {}\n' 'Header values: {}\n' 'Data values: {}'.format( n_cols, len(str_vals), i, [x.name for x in cols], str_vals)) raise InconsistentTableError(errmsg) for j, col in enumerate(cols): col.str_vals.append(str_vals[j]) self.data.masks(cols) if hasattr(self.header, 'table_meta'): self.meta['table'].update(self.header.table_meta) table = self.outputter(cols, self.meta) self.cols = self.header.cols _apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) return table def inconsistent_handler(self, str_vals, ncols): """ Adjust or skip data entries if a row is inconsistent with the header. The default implementation does no adjustment, and hence will always trigger an exception in read() any time the number of data entries does not match the header. Note that this will *not* be called if the row already matches the header. Parameters ---------- str_vals : list A list of value strings from the current row of the table. ncols : int The expected number of entries from the table header. Returns ------- str_vals : list List of strings to be parsed into data entries in the output table. If the length of this list does not match ``ncols``, an exception will be raised in read(). Can also be None, in which case the row will be skipped. """ # an empty list will always trigger an InconsistentTableError in read() return str_vals @property def comment_lines(self): """Return lines in the table that match header.comment regexp""" if not hasattr(self, 'lines'): raise ValueError('Table must be read prior to accessing the header comment lines') if self.header.comment: re_comment = re.compile(self.header.comment) comment_lines = [x for x in self.lines if re_comment.match(x)] else: comment_lines = [] return comment_lines def update_table_data(self, table): """ Update table columns in place if needed. This is a hook to allow updating the table columns after name filtering but before setting up to write the data. This is currently only used by ECSV and is otherwise just a pass-through. Parameters ---------- table : `astropy.table.Table` Input table for writing Returns ------- table : `astropy.table.Table` Output table for writing """ return table def write_header(self, lines, meta): self.header.write_comments(lines, meta) self.header.write(lines) def write(self, table): """ Write ``table`` as list of strings. Parameters ---------- table : `~astropy.table.Table` Input table data. Returns ------- lines : list List of strings corresponding to ASCII table """ # Check column names before altering self.header.cols = list(table.columns.values()) self.header.check_column_names(self.names, self.strict_names, False) # In-place update of columns in input ``table`` to reflect column # filtering. Note that ``table`` is guaranteed to be a copy of the # original user-supplied table. _apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) # This is a hook to allow updating the table columns after name # filtering but before setting up to write the data. This is currently # only used by ECSV and is otherwise just a pass-through. table = self.update_table_data(table) # Now use altered columns new_cols = list(table.columns.values()) # link information about the columns to the writer object (i.e. self) self.header.cols = new_cols self.data.cols = new_cols self.header.table_meta = table.meta # Write header and data to lines list lines = [] self.write_header(lines, table.meta) self.data.write(lines) return lines class ContinuationLinesInputter(BaseInputter): """Inputter where lines ending in ``continuation_char`` are joined with the subsequent line. Example:: col1 col2 col3 1 \ 2 3 4 5 \ 6 """ continuation_char = '\\' replace_char = ' ' # If no_continue is not None then lines matching this regex are not subject # to line continuation. The initial use case here is Daophot. In this # case the continuation character is just replaced with replace_char. no_continue = None def process_lines(self, lines): re_no_continue = re.compile(self.no_continue) if self.no_continue else None parts = [] outlines = [] for line in lines: if re_no_continue and re_no_continue.match(line): line = line.replace(self.continuation_char, self.replace_char) if line.endswith(self.continuation_char): parts.append(line.replace(self.continuation_char, self.replace_char)) else: parts.append(line) outlines.append(''.join(parts)) parts = [] return outlines class WhitespaceSplitter(DefaultSplitter): def process_line(self, line): """Replace tab with space within ``line`` while respecting quoted substrings""" newline = [] in_quote = False lastchar = None for char in line: if char == self.quotechar and (self.escapechar is None or lastchar != self.escapechar): in_quote = not in_quote if char == '\t' and not in_quote: char = ' ' lastchar = char newline.append(char) return ''.join(newline) extra_reader_pars = ('Reader', 'Inputter', 'Outputter', 'delimiter', 'comment', 'quotechar', 'header_start', 'data_start', 'data_end', 'converters', 'encoding', 'data_Splitter', 'header_Splitter', 'names', 'include_names', 'exclude_names', 'strict_names', 'fill_values', 'fill_include_names', 'fill_exclude_names') def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs): """Initialize a table reader allowing for common customizations. See ui.get_reader() for param docs. This routine is for internal (package) use only and is useful because it depends only on the "core" module. """ from .fastbasic import FastBasic if issubclass(Reader, FastBasic): # Fast readers handle args separately if Inputter is not None: kwargs['Inputter'] = Inputter return Reader(**kwargs) if 'fast_reader' in kwargs: del kwargs['fast_reader'] # ignore fast_reader parameter for slow readers reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars) reader = Reader(**reader_kwargs) if Inputter is not None: reader.inputter = Inputter() if Outputter is not None: reader.outputter = Outputter() # Issue #855 suggested to set data_start to header_start + default_header_length # Thus, we need to retrieve this from the class definition before resetting these numbers. try: default_header_length = reader.data.start_line - reader.header.start_line except TypeError: # Start line could be None or an instancemethod default_header_length = None if 'delimiter' in kwargs: reader.header.splitter.delimiter = kwargs['delimiter'] reader.data.splitter.delimiter = kwargs['delimiter'] if 'comment' in kwargs: reader.header.comment = kwargs['comment'] reader.data.comment = kwargs['comment'] if 'quotechar' in kwargs: reader.header.splitter.quotechar = kwargs['quotechar'] reader.data.splitter.quotechar = kwargs['quotechar'] if 'data_start' in kwargs: reader.data.start_line = kwargs['data_start'] if 'data_end' in kwargs: reader.data.end_line = kwargs['data_end'] if 'header_start' in kwargs: if (reader.header.start_line is not None): reader.header.start_line = kwargs['header_start'] # For FixedWidthTwoLine the data_start is calculated relative to the position line. # However, position_line is given as absolute number and not relative to header_start. # So, ignore this Reader here. if (('data_start' not in kwargs) and (default_header_length is not None) and reader._format_name not in ['fixed_width_two_line', 'commented_header']): reader.data.start_line = reader.header.start_line + default_header_length elif kwargs['header_start'] is not None: # User trying to set a None header start to some value other than None raise ValueError('header_start cannot be modified for this Reader') if 'converters' in kwargs: reader.outputter.converters = kwargs['converters'] if 'data_Splitter' in kwargs: reader.data.splitter = kwargs['data_Splitter']() if 'header_Splitter' in kwargs: reader.header.splitter = kwargs['header_Splitter']() if 'names' in kwargs: reader.names = kwargs['names'] if 'include_names' in kwargs: reader.include_names = kwargs['include_names'] if 'exclude_names' in kwargs: reader.exclude_names = kwargs['exclude_names'] # Strict names is normally set only within the guessing process to # indicate that column names cannot be numeric or have certain # characters at the beginning or end. It gets used in # BaseHeader.check_column_names(). if 'strict_names' in kwargs: reader.strict_names = kwargs['strict_names'] if 'fill_values' in kwargs: reader.data.fill_values = kwargs['fill_values'] if 'fill_include_names' in kwargs: reader.data.fill_include_names = kwargs['fill_include_names'] if 'fill_exclude_names' in kwargs: reader.data.fill_exclude_names = kwargs['fill_exclude_names'] if 'encoding' in kwargs: reader.encoding = kwargs['encoding'] reader.inputter.encoding = kwargs['encoding'] return reader extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats', 'strip_whitespace', 'names', 'include_names', 'exclude_names', 'fill_values', 'fill_include_names', 'fill_exclude_names') def _get_writer(Writer, fast_writer, **kwargs): """Initialize a table writer allowing for common customizations. This routine is for internal (package) use only and is useful because it depends only on the "core" module. """ from .fastbasic import FastBasic # A value of None for fill_values imply getting the default string # representation of masked values (depending on the writer class), but the # machinery expects a list. The easiest here is to just pop the value off, # i.e. fill_values=None is the same as not providing it at all. if 'fill_values' in kwargs and kwargs['fill_values'] is None: del kwargs['fill_values'] if issubclass(Writer, FastBasic): # Fast writers handle args separately return Writer(**kwargs) elif fast_writer and 'fast_{0}'.format(Writer._format_name) in FAST_CLASSES: # Switch to fast writer kwargs['fast_writer'] = fast_writer return FAST_CLASSES['fast_{0}'.format(Writer._format_name)](**kwargs) writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars) writer = Writer(**writer_kwargs) if 'delimiter' in kwargs: writer.header.splitter.delimiter = kwargs['delimiter'] writer.data.splitter.delimiter = kwargs['delimiter'] if 'comment' in kwargs: writer.header.write_comment = kwargs['comment'] writer.data.write_comment = kwargs['comment'] if 'quotechar' in kwargs: writer.header.splitter.quotechar = kwargs['quotechar'] writer.data.splitter.quotechar = kwargs['quotechar'] if 'formats' in kwargs: writer.data.formats = kwargs['formats'] if 'strip_whitespace' in kwargs: if kwargs['strip_whitespace']: # Restore the default SplitterClass process_val method which strips # whitespace. This may have been changed in the Writer # initialization (e.g. Rdb and Tab) writer.data.splitter.process_val = operator.methodcaller('strip') else: writer.data.splitter.process_val = None if 'names' in kwargs: writer.header.names = kwargs['names'] if 'include_names' in kwargs: writer.include_names = kwargs['include_names'] if 'exclude_names' in kwargs: writer.exclude_names = kwargs['exclude_names'] if 'fill_values' in kwargs: # Prepend user-specified values to the class default. with suppress(TypeError, IndexError): # Test if it looks like (match, replace_string, optional_colname), # in which case make it a list kwargs['fill_values'][1] + '' kwargs['fill_values'] = [kwargs['fill_values']] writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values if 'fill_include_names' in kwargs: writer.data.fill_include_names = kwargs['fill_include_names'] if 'fill_exclude_names' in kwargs: writer.data.fill_exclude_names = kwargs['fill_exclude_names'] return writer
b1e9db7fb9664be150399df047d6c2aaa03693c8f05c4df0afb535217f127f3f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ An extensible ASCII table reader and writer. """ from .core import (InconsistentTableError, ParameterError, NoType, StrType, NumType, FloatType, IntType, AllType, Column, BaseInputter, ContinuationLinesInputter, BaseHeader, BaseData, BaseOutputter, TableOutputter, BaseReader, BaseSplitter, DefaultSplitter, WhitespaceSplitter, convert_numpy, masked ) from .basic import (Basic, BasicHeader, BasicData, Rdb, Csv, Tab, NoHeader, CommentedHeader) from .fastbasic import (FastBasic, FastCsv, FastTab, FastNoHeader, FastCommentedHeader, FastRdb) from .cds import Cds from .ecsv import Ecsv from .latex import Latex, AASTex, latexdicts from .html import HTML from .ipac import Ipac from .daophot import Daophot from .sextractor import SExtractor from .fixedwidth import (FixedWidth, FixedWidthNoHeader, FixedWidthTwoLine, FixedWidthSplitter, FixedWidthHeader, FixedWidthData) from .rst import RST from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace) from . import connect
f39a23a3596a9a0e68ccde02049952186e7c623b6c5bfa9713bb0490591df0e2
# Licensed under a 3-clause BSD style license """ :Author: Simon Gibbons ([email protected]) """ from .core import DefaultSplitter from .fixedwidth import (FixedWidth, FixedWidthData, FixedWidthHeader, FixedWidthTwoLineDataSplitter) class SimpleRSTHeader(FixedWidthHeader): position_line = 0 start_line = 1 splitter_class = DefaultSplitter position_char = '=' def get_fixedwidth_params(self, line): vals, starts, ends = super().get_fixedwidth_params(line) # The right hand column can be unbounded ends[-1] = None return vals, starts, ends class SimpleRSTData(FixedWidthData): start_line = 3 end_line = -1 splitter_class = FixedWidthTwoLineDataSplitter class RST(FixedWidth): """ Read or write a `reStructuredText simple format table <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#simple-tables>`_. Example:: ==== ===== ====== Col1 Col2 Col3 ==== ===== ====== 1 2.3 Hello 2 4.5 Worlds ==== ===== ====== Currently there is no support for reading tables which utilize continuation lines, or for ones which define column spans through the use of an additional line of dashes in the header. """ _format_name = 'rst' _description = 'reStructuredText simple table' data_class = SimpleRSTData header_class = SimpleRSTHeader def __init__(self): super().__init__(delimiter_pad=None, bookend=False) def write(self, lines): lines = super().write(lines) lines = [lines[1]] + lines + [lines[1]] return lines
bdccb92c05c65777d9a610c8a73bbc88a176bc4c7343db8577a0bdfa0bad0339
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. fixedwidth.py: Read or write a table with fixed width columns. :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ from . import core from .core import InconsistentTableError, DefaultSplitter from . import basic class FixedWidthSplitter(core.BaseSplitter): """ Split line based on fixed start and end positions for each ``col`` in ``self.cols``. This class requires that the Header class will have defined ``col.start`` and ``col.end`` for each column. The reference to the ``header.cols`` gets put in the splitter object by the base Reader.read() function just in time for splitting data lines by a ``data`` object. Note that the ``start`` and ``end`` positions are defined in the pythonic style so line[start:end] is the desired substring for a column. This splitter class does not have a hook for ``process_lines`` since that is generally not useful for fixed-width input. """ delimiter_pad = '' bookend = False delimiter = '|' def __call__(self, lines): for line in lines: vals = [line[x.start:x.end] for x in self.cols] if self.process_val: yield [self.process_val(x) for x in vals] else: yield vals def join(self, vals, widths): pad = self.delimiter_pad or '' delimiter = self.delimiter or '' padded_delim = pad + delimiter + pad if self.bookend: bookend_left = delimiter + pad bookend_right = pad + delimiter else: bookend_left = '' bookend_right = '' vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)] return bookend_left + padded_delim.join(vals) + bookend_right class FixedWidthHeaderSplitter(DefaultSplitter): '''Splitter class that splits on ``|``.''' delimiter = '|' class FixedWidthHeader(basic.BasicHeader): """ Fixed width table header reader. """ splitter_class = FixedWidthHeaderSplitter """ Splitter class for splitting data lines into columns """ position_line = None # secondary header line position """ row index of line that specifies position (default = 1) """ set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'") def get_line(self, lines, index): for i, line in enumerate(self.process_lines(lines)): if i == index: break else: # No header line matching raise InconsistentTableError('No header line found in table') return line def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines``. Based on the previously set Header attributes find or create the column names. Sets ``self.cols`` with the list of Columns. Parameters ---------- lines : list List of table lines """ # See "else" clause below for explanation of start_line and position_line start_line = core._get_line_index(self.start_line, self.process_lines(lines)) position_line = core._get_line_index(self.position_line, self.process_lines(lines)) # If start_line is none then there is no header line. Column positions are # determined from first data line and column names are either supplied by user # or auto-generated. if start_line is None: if position_line is not None: raise ValueError("Cannot set position_line without also setting header_start") data_lines = self.data.process_lines(lines) if not data_lines: raise InconsistentTableError( 'No data lines found so cannot autogenerate column names') vals, starts, ends = self.get_fixedwidth_params(data_lines[0]) self.names = [self.auto_format.format(i) for i in range(1, len(vals) + 1)] else: # This bit of code handles two cases: # start_line = <index> and position_line = None # Single header line where that line is used to determine both the # column positions and names. # start_line = <index> and position_line = <index2> # Two header lines where the first line defines the column names and # the second line defines the column positions if position_line is not None: # Define self.col_starts and self.col_ends so that the call to # get_fixedwidth_params below will use those to find the header # column names. Note that get_fixedwidth_params returns Python # slice col_ends but expects inclusive col_ends on input (for # more intuitive user interface). line = self.get_line(lines, position_line) if len(set(line) - set([self.splitter.delimiter, ' '])) != 1: raise InconsistentTableError('Position line should only contain delimiters and one other character, e.g. "--- ------- ---".') # The line above lies. It accepts white space as well. # We don't want to encourage using three different # characters, because that can cause ambiguities, but white # spaces are so common everywhere that practicality beats # purity here. charset = self.set_of_position_line_characters.union(set([self.splitter.delimiter, ' '])) if not set(line).issubset(charset): raise InconsistentTableError('Characters in position line must be part of {0}'.format(charset)) vals, self.col_starts, col_ends = self.get_fixedwidth_params(line) self.col_ends = [x - 1 if x is not None else None for x in col_ends] # Get the header column names and column positions line = self.get_line(lines, start_line) vals, starts, ends = self.get_fixedwidth_params(line) self.names = vals self._set_cols_from_names() # Set column start and end positions. for i, col in enumerate(self.cols): col.start = starts[i] col.end = ends[i] def get_fixedwidth_params(self, line): """ Split ``line`` on the delimiter and determine column values and column start and end positions. This might include null columns with zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or ``header2_row = "----- ------- -----"``). The null columns are stripped out. Returns the values between delimiters and the corresponding start and end positions. Parameters ---------- line : str Input line Returns ------- vals : list List of values. starts : list List of starting indices. ends : list List of ending indices. """ # If column positions are already specified then just use those. # If neither column starts or ends are given, figure out positions # between delimiters. Otherwise, either the starts or the ends have # been given, so figure out whichever wasn't given. if self.col_starts is not None and self.col_ends is not None: starts = list(self.col_starts) # could be any iterable, e.g. np.array ends = [x + 1 if x is not None else None for x in self.col_ends] # user supplies inclusive endpoint if len(starts) != len(ends): raise ValueError('Fixed width col_starts and col_ends must have the same length') vals = [line[start:end].strip() for start, end in zip(starts, ends)] elif self.col_starts is None and self.col_ends is None: # There might be a cleaner way to do this but it works... vals = line.split(self.splitter.delimiter) starts = [0] ends = [] for val in vals: if val: ends.append(starts[-1] + len(val)) starts.append(ends[-1] + 1) else: starts[-1] += 1 starts = starts[:-1] vals = [x.strip() for x in vals if x] if len(vals) != len(starts) or len(vals) != len(ends): raise InconsistentTableError('Error parsing fixed width header') else: # exactly one of col_starts or col_ends is given... if self.col_starts is not None: starts = list(self.col_starts) ends = starts[1:] + [None] # Assume each col ends where the next starts else: # self.col_ends is not None ends = [x + 1 for x in self.col_ends] starts = [0] + ends[:-1] # Assume each col starts where the last ended vals = [line[start:end].strip() for start, end in zip(starts, ends)] return vals, starts, ends def write(self, lines): # Header line not written until data are formatted. Until then it is # not known how wide each column will be for fixed width. pass class FixedWidthData(basic.BasicData): """ Base table data reader. """ splitter_class = FixedWidthSplitter """ Splitter class for splitting data lines into columns """ def write(self, lines): vals_list = [] col_str_iters = self.str_vals() for vals in zip(*col_str_iters): vals_list.append(vals) for i, col in enumerate(self.cols): col.width = max([len(vals[i]) for vals in vals_list]) if self.header.start_line is not None: col.width = max(col.width, len(col.info.name)) widths = [col.width for col in self.cols] if self.header.start_line is not None: lines.append(self.splitter.join([col.info.name for col in self.cols], widths)) if self.header.position_line is not None: char = self.header.position_char if len(char) != 1: raise ValueError('Position_char="{}" must be a single ' 'character'.format(char)) vals = [char * col.width for col in self.cols] lines.append(self.splitter.join(vals, widths)) for vals in vals_list: lines.append(self.splitter.join(vals, widths)) return lines class FixedWidth(basic.Basic): """ Read or write a fixed width table with a single header line that defines column names and positions. Examples:: # Bar delimiter in header and data | Col1 | Col2 | Col3 | | 1.2 | hello there | 3 | | 2.4 | many words | 7 | # Bar delimiter in header only Col1 | Col2 | Col3 1.2 hello there 3 2.4 many words 7 # No delimiter with column positions specified as input Col1 Col2Col3 1.2hello there 3 2.4many words 7 See the :ref:`fixed_width_gallery` for specific usage examples. """ _format_name = 'fixed_width' _description = 'Fixed width' header_class = FixedWidthHeader data_class = FixedWidthData def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True): super().__init__() self.data.splitter.delimiter_pad = delimiter_pad self.data.splitter.bookend = bookend self.header.col_starts = col_starts self.header.col_ends = col_ends class FixedWidthNoHeaderHeader(FixedWidthHeader): '''Header reader for fixed with tables with no header line''' start_line = None class FixedWidthNoHeaderData(FixedWidthData): '''Data reader for fixed width tables with no header line''' start_line = 0 class FixedWidthNoHeader(FixedWidth): """ Read or write a fixed width table which has no header line. Column names are either input (``names`` keyword) or auto-generated. Column positions are determined either by input (``col_starts`` and ``col_stops`` keywords) or by splitting the first data line. In the latter case a ``delimiter`` is required to split the data line. Examples:: # Bar delimiter in header and data | 1.2 | hello there | 3 | | 2.4 | many words | 7 | # Compact table having no delimiter and column positions specified as input 1.2hello there3 2.4many words 7 This class is just a convenience wrapper around the ``FixedWidth`` reader but with ``header.start_line = None`` and ``data.start_line = 0``. See the :ref:`fixed_width_gallery` for specific usage examples. """ _format_name = 'fixed_width_no_header' _description = 'Fixed width with no header' header_class = FixedWidthNoHeaderHeader data_class = FixedWidthNoHeaderData def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True): super().__init__(col_starts, col_ends, delimiter_pad=delimiter_pad, bookend=bookend) class FixedWidthTwoLineHeader(FixedWidthHeader): '''Header reader for fixed width tables splitting on whitespace. For fixed width tables with several header lines, there is typically a white-space delimited format line, so splitting on white space is needed. ''' splitter_class = DefaultSplitter class FixedWidthTwoLineDataSplitter(FixedWidthSplitter): '''Splitter for fixed width tables splitting on ``' '``.''' delimiter = ' ' class FixedWidthTwoLineData(FixedWidthData): '''Data reader for fixed with tables with two header lines.''' splitter_class = FixedWidthTwoLineDataSplitter class FixedWidthTwoLine(FixedWidth): """ Read or write a fixed width table which has two header lines. The first header line defines the column names and the second implicitly defines the column positions. Examples:: # Typical case with column extent defined by ---- under column names. col1 col2 <== header_start = 0 ----- ------------ <== position_line = 1, position_char = "-" 1 bee flies <== data_start = 2 2 fish swims # Pretty-printed table +------+------------+ | Col1 | Col2 | +------+------------+ | 1.2 | "hello" | | 2.4 | there world| +------+------------+ See the :ref:`fixed_width_gallery` for specific usage examples. """ _format_name = 'fixed_width_two_line' _description = 'Fixed width with second header line' data_class = FixedWidthTwoLineData header_class = FixedWidthTwoLineHeader def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False): super().__init__(delimiter_pad=delimiter_pad, bookend=bookend) self.header.position_line = position_line self.header.position_char = position_char self.data.start_line = position_line + 1
21f568567732446c40a6daaf53862e12031ae5ddfa0778196861d495c359915b
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. latex.py: Classes to read and write LaTeX tables :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re from . import core latexdicts = {'AA': {'tabletype': 'table', 'header_start': r'\hline \hline', 'header_end': r'\hline', 'data_end': r'\hline'}, 'doublelines': {'tabletype': 'table', 'header_start': r'\hline \hline', 'header_end': r'\hline\hline', 'data_end': r'\hline\hline'}, 'template': {'tabletype': 'tabletype', 'caption': 'caption', 'tablealign': 'tablealign', 'col_align': 'col_align', 'preamble': 'preamble', 'header_start': 'header_start', 'header_end': 'header_end', 'data_start': 'data_start', 'data_end': 'data_end', 'tablefoot': 'tablefoot', 'units': {'col1': 'unit of col1', 'col2': 'unit of col2'}} } RE_COMMENT = re.compile(r'(?<!\\)%') # % character but not \% def add_dictval_to_list(adict, key, alist): ''' Add a value from a dictionary to a list Parameters ---------- adict : dictionary key : hashable alist : list List where value should be added ''' if key in adict: if isinstance(adict[key], str): alist.append(adict[key]) else: alist.extend(adict[key]) def find_latex_line(lines, latex): ''' Find the first line which matches a patters Parameters ---------- lines : list List of strings latex : str Search pattern Returns ------- line_num : int, None Line number. Returns None, if no match was found ''' re_string = re.compile(latex.replace('\\', '\\\\')) for i, line in enumerate(lines): if re_string.match(line): return i else: return None class LatexInputter(core.BaseInputter): def process_lines(self, lines): return [lin.strip() for lin in lines] class LatexSplitter(core.BaseSplitter): '''Split LaTeX table date. Default delimiter is `&`. ''' delimiter = '&' def __call__(self, lines): last_line = RE_COMMENT.split(lines[-1])[0].strip() if not last_line.endswith(r'\\'): lines[-1] = last_line + r'\\' return super().__call__(lines) def process_line(self, line): """Remove whitespace at the beginning or end of line. Also remove \\ at end of line""" line = RE_COMMENT.split(line)[0] line = line.strip() if line.endswith(r'\\'): line = line.rstrip(r'\\') else: raise core.InconsistentTableError(r'Lines in LaTeX table have to end with \\') return line def process_val(self, val): """Remove whitespace and {} at the beginning or end of value.""" val = val.strip() if val and (val[0] == '{') and (val[-1] == '}'): val = val[1:-1] return val def join(self, vals): '''Join values together and add a few extra spaces for readability''' delimiter = ' ' + self.delimiter + ' ' return delimiter.join(x.strip() for x in vals) + r' \\' class LatexHeader(core.BaseHeader): '''Class to read the header of Latex Tables''' header_start = r'\begin{tabular}' splitter_class = LatexSplitter def start_line(self, lines): line = find_latex_line(lines, self.header_start) if line is not None: return line + 1 else: return None def _get_units(self): units = {} col_units = [col.info.unit for col in self.cols] for name, unit in zip(self.colnames, col_units): if unit: try: units[name] = unit.to_string(format='latex_inline') except AttributeError: units[name] = unit return units def write(self, lines): if 'col_align' not in self.latex: self.latex['col_align'] = len(self.cols) * 'c' if 'tablealign' in self.latex: align = '[' + self.latex['tablealign'] + ']' else: align = '' if self.latex['tabletype'] is not None: lines.append(r'\begin{' + self.latex['tabletype'] + r'}' + align) add_dictval_to_list(self.latex, 'preamble', lines) if 'caption' in self.latex: lines.append(r'\caption{' + self.latex['caption'] + '}') lines.append(self.header_start + r'{' + self.latex['col_align'] + r'}') add_dictval_to_list(self.latex, 'header_start', lines) lines.append(self.splitter.join(self.colnames)) units = self._get_units() if 'units' in self.latex: units.update(self.latex['units']) if units: lines.append(self.splitter.join([units.get(name, ' ') for name in self.colnames])) add_dictval_to_list(self.latex, 'header_end', lines) class LatexData(core.BaseData): '''Class to read the data in LaTeX tables''' data_start = None data_end = r'\end{tabular}' splitter_class = LatexSplitter def start_line(self, lines): if self.data_start: return find_latex_line(lines, self.data_start) else: start = self.header.start_line(lines) if start is None: raise core.InconsistentTableError(r'Could not find table start') return start + 1 def end_line(self, lines): if self.data_end: return find_latex_line(lines, self.data_end) else: return None def write(self, lines): add_dictval_to_list(self.latex, 'data_start', lines) core.BaseData.write(self, lines) add_dictval_to_list(self.latex, 'data_end', lines) lines.append(self.data_end) add_dictval_to_list(self.latex, 'tablefoot', lines) if self.latex['tabletype'] is not None: lines.append(r'\end{' + self.latex['tabletype'] + '}') class Latex(core.BaseReader): r'''Write and read LaTeX tables. This class implements some LaTeX specific commands. Its main purpose is to write out a table in a form that LaTeX can compile. It is beyond the scope of this class to implement every possible LaTeX command, instead the focus is to generate a syntactically valid LaTeX tables. This class can also read simple LaTeX tables (one line per table row, no ``\multicolumn`` or similar constructs), specifically, it can read the tables that it writes. Reading a LaTeX table, the following keywords are accepted: **ignore_latex_commands** : Lines starting with these LaTeX commands will be treated as comments (i.e. ignored). When writing a LaTeX table, the some keywords can customize the format. Care has to be taken here, because python interprets ``\\`` in a string as an escape character. In order to pass this to the output either format your strings as raw strings with the ``r`` specifier or use a double ``\\\\``. Examples:: caption = r'My table \label{mytable}' caption = 'My table \\\\label{mytable}' **latexdict** : Dictionary of extra parameters for the LaTeX output * tabletype : used for first and last line of table. The default is ``\\begin{table}``. The following would generate a table, which spans the whole page in a two-column document:: ascii.write(data, sys.stdout, Writer = ascii.Latex, latexdict = {'tabletype': 'table*'}) If ``None``, the table environment will be dropped, keeping only the ``tabular`` environment. * tablealign : positioning of table in text. The default is not to specify a position preference in the text. If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``. * col_align : Alignment of columns If not present all columns will be centered. * caption : Table caption (string or list of strings) This will appear above the table as it is the standard in many scientific publications. If you prefer a caption below the table, just write the full LaTeX command as ``latexdict['tablefoot'] = r'\caption{My table}'`` * preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX Each one can be a string or a list of strings. These strings will be inserted into the table without any further processing. See the examples below. * units : dictionary of strings Keys in this dictionary should be names of columns. If present, a line in the LaTeX table directly below the column names is added, which contains the values of the dictionary. Example:: from astropy.io import ascii data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]} ascii.write(data, Writer=ascii.Latex, latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}}) If the column has no entry in the ``units`` dictionary, it defaults to the **unit** attribute of the column. If this attribute is not specified (i.e. it is None), the unit will be written as ``' '``. Run the following code to see where each element of the dictionary is inserted in the LaTeX table:: from astropy.io import ascii data = {'cola': [1,2], 'colb': [3,4]} ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template']) Some table styles are predefined in the dictionary ``ascii.latex.latexdicts``. The following generates in table in style preferred by A&A and some other journals:: ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA']) As an example, this generates a table, which spans all columns and is centered on the page:: ascii.write(data, Writer=ascii.Latex, col_align='|lr|', latexdict={'preamble': r'\begin{center}', 'tablefoot': r'\end{center}', 'tabletype': 'table*'}) **caption** : Set table caption Shorthand for:: latexdict['caption'] = caption **col_align** : Set the column alignment. If not present this will be auto-generated for centered columns. Shorthand for:: latexdict['col_align'] = col_align ''' _format_name = 'latex' _io_registry_format_aliases = ['latex'] _io_registry_suffix = '.tex' _description = 'LaTeX table' header_class = LatexHeader data_class = LatexData inputter_class = LatexInputter def __init__(self, ignore_latex_commands=['hline', 'vspace', 'tableline'], latexdict={}, caption='', col_align=None): super().__init__() self.latex = {} # The latex dict drives the format of the table and needs to be shared # with data and header self.header.latex = self.latex self.data.latex = self.latex self.latex['tabletype'] = 'table' self.latex.update(latexdict) if caption: self.latex['caption'] = caption if col_align: self.latex['col_align'] = col_align self.ignore_latex_commands = ignore_latex_commands self.header.comment = '%|' + '|'.join( [r'\\' + command for command in self.ignore_latex_commands]) self.data.comment = self.header.comment def write(self, table=None): self.header.start_line = None self.data.start_line = None return core.BaseReader.write(self, table=table) class AASTexHeaderSplitter(LatexSplitter): r'''Extract column names from a `deluxetable`_. This splitter expects the following LaTeX code **in a single line**: \tablehead{\colhead{col1} & ... & \colhead{coln}} ''' def __call__(self, lines): return super(LatexSplitter, self).__call__(lines) def process_line(self, line): """extract column names from tablehead """ line = line.split('%')[0] line = line.replace(r'\tablehead', '') line = line.strip() if (line[0] == '{') and (line[-1] == '}'): line = line[1:-1] else: raise core.InconsistentTableError(r'\tablehead is missing {}') return line.replace(r'\colhead', '') def join(self, vals): return ' & '.join([r'\colhead{' + str(x) + '}' for x in vals]) class AASTexHeader(LatexHeader): r'''In a `deluxetable <http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header keywords differ from standard LaTeX. This header is modified to take that into account. ''' header_start = r'\tablehead' splitter_class = AASTexHeaderSplitter def start_line(self, lines): return find_latex_line(lines, r'\tablehead') def write(self, lines): if 'col_align' not in self.latex: self.latex['col_align'] = len(self.cols) * 'c' if 'tablealign' in self.latex: align = '[' + self.latex['tablealign'] + ']' else: align = '' lines.append(r'\begin{' + self.latex['tabletype'] + r'}{' + self.latex['col_align'] + r'}' + align) add_dictval_to_list(self.latex, 'preamble', lines) if 'caption' in self.latex: lines.append(r'\tablecaption{' + self.latex['caption'] + '}') tablehead = ' & '.join([r'\colhead{' + name + '}' for name in self.colnames]) units = self._get_units() if 'units' in self.latex: units.update(self.latex['units']) if units: tablehead += r'\\ ' + self.splitter.join([units.get(name, ' ') for name in self.colnames]) lines.append(r'\tablehead{' + tablehead + '}') class AASTexData(LatexData): r'''In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata` ''' data_start = r'\startdata' data_end = r'\enddata' def start_line(self, lines): return find_latex_line(lines, self.data_start) + 1 def write(self, lines): lines.append(self.data_start) lines_length_initial = len(lines) core.BaseData.write(self, lines) # To remove extra space(s) and // appended which creates an extra new line # in the end. if len(lines) > lines_length_initial: # we compile separately because py2.6 doesn't have a flags keyword in re.sub re_final_line = re.compile(r'\s* \\ \\ \s* $', flags=re.VERBOSE) lines[-1] = re.sub(re_final_line, '', lines[-1]) lines.append(self.data_end) add_dictval_to_list(self.latex, 'tablefoot', lines) lines.append(r'\end{' + self.latex['tabletype'] + r'}') class AASTex(Latex): '''Write and read AASTeX tables. This class implements some AASTeX specific commands. AASTeX is used for the AAS (American Astronomical Society) publications like ApJ, ApJL and AJ. It derives from the ``Latex`` reader and accepts the same keywords. However, the keywords ``header_start``, ``header_end``, ``data_start`` and ``data_end`` in ``latexdict`` have no effect. ''' _format_name = 'aastex' _io_registry_format_aliases = ['aastex'] _io_registry_suffix = '' # AASTex inherits from Latex, so override this class attr _description = 'AASTeX deluxetable used for AAS journals' header_class = AASTexHeader data_class = AASTexData def __init__(self, **kwargs): super(AASTex, self).__init__(**kwargs) # check if tabletype was explicitly set by the user if not (('latexdict' in kwargs) and ('tabletype' in kwargs['latexdict'])): self.latex['tabletype'] = 'deluxetable'
715e4e0c923018c5f60f92a3e8b1830bbd8b7076e1266a47d1498186bbbc729e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. ipac.py: Classes to read IPAC table format :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re from collections import defaultdict, OrderedDict from textwrap import wrap from warnings import warn from . import core from . import fixedwidth from . import basic from ...utils.exceptions import AstropyUserWarning from ...table.pprint import get_auto_format_func class IpacFormatErrorDBMS(Exception): def __str__(self): return '{0}\nSee {1}'.format( super(Exception, self).__str__(), 'http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html') class IpacFormatError(Exception): def __str__(self): return '{0}\nSee {1}'.format( super(Exception, self).__str__(), 'http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html') class IpacHeaderSplitter(core.BaseSplitter): '''Splitter for Ipac Headers. This splitter is similar its parent when reading, but supports a fixed width format (as required for Ipac table headers) for writing. ''' process_line = None process_val = None delimiter = '|' delimiter_pad = '' skipinitialspace = False comment = r'\s*\\' write_comment = r'\\' col_starts = None col_ends = None def join(self, vals, widths): pad = self.delimiter_pad or '' delimiter = self.delimiter or '' padded_delim = pad + delimiter + pad bookend_left = delimiter + pad bookend_right = pad + delimiter vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)] return bookend_left + padded_delim.join(vals) + bookend_right class IpacHeader(fixedwidth.FixedWidthHeader): """IPAC table header""" splitter_class = IpacHeaderSplitter # Defined ordered list of possible types. Ordering is needed to # distinguish between "d" (double) and "da" (date) as defined by # the IPAC standard for abbreviations. This gets used in get_col_type(). col_type_list = (('integer', core.IntType), ('long', core.IntType), ('double', core.FloatType), ('float', core.FloatType), ('real', core.FloatType), ('char', core.StrType), ('date', core.StrType)) definition = 'ignore' start_line = None def process_lines(self, lines): """Generator to yield IPAC header lines, i.e. those starting and ending with delimiter character (with trailing whitespace stripped)""" delim = self.splitter.delimiter for line in lines: line = line.rstrip() if line.startswith(delim) and line.endswith(delim): yield line.strip(delim) def update_meta(self, lines, meta): """ Extract table-level comments and keywords for IPAC table. See: http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html#kw """ def process_keyword_value(val): """ Take a string value and convert to float, int or str, and strip quotes as needed. """ val = val.strip() try: val = int(val) except Exception: try: val = float(val) except Exception: # Strip leading/trailing quote. The spec says that a matched pair # of quotes is required, but this code will allow a non-quoted value. for quote in ('"', "'"): if val.startswith(quote) and val.endswith(quote): val = val[1:-1] break return val table_meta = meta['table'] table_meta['comments'] = [] table_meta['keywords'] = OrderedDict() keywords = table_meta['keywords'] re_keyword = re.compile(r'\\' r'(?P<name> \w+)' r'\s* = (?P<value> .+) $', re.VERBOSE) for line in lines: # Keywords and comments start with "\". Once the first non-slash # line is seen then bail out. if not line.startswith('\\'): break m = re_keyword.match(line) if m: name = m.group('name') val = process_keyword_value(m.group('value')) # IPAC allows for continuation keywords, e.g. # \SQL = 'WHERE ' # \SQL = 'SELECT (25 column names follow in next row.)' if name in keywords and isinstance(val, str): prev_val = keywords[name]['value'] if isinstance(prev_val, str): val = prev_val + val keywords[name] = {'value': val} else: # Comment is required to start with "\ " if line.startswith('\\ '): val = line[2:].strip() if val: table_meta['comments'].append(val) def get_col_type(self, col): for (col_type_key, col_type) in self.col_type_list: if col_type_key.startswith(col.raw_type.lower()): return col_type else: raise ValueError('Unknown data type ""{}"" for column "{}"'.format( col.raw_type, col.name)) def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines``. Based on the previously set Header attributes find or create the column names. Sets ``self.cols`` with the list of Columns. Parameters ---------- lines : list List of table lines """ header_lines = self.process_lines(lines) # generator returning valid header lines header_vals = [vals for vals in self.splitter(header_lines)] if len(header_vals) == 0: raise ValueError('At least one header line beginning and ending with ' 'delimiter required') elif len(header_vals) > 4: raise ValueError('More than four header lines were found') # Generate column definitions cols = [] start = 1 for i, name in enumerate(header_vals[0]): col = core.Column(name=name.strip(' -')) col.start = start col.end = start + len(name) if len(header_vals) > 1: col.raw_type = header_vals[1][i].strip(' -') col.type = self.get_col_type(col) if len(header_vals) > 2: col.unit = header_vals[2][i].strip() or None # Can't strip dashes here if len(header_vals) > 3: # The IPAC null value corresponds to the io.ascii bad_value. # In this case there isn't a fill_value defined, so just put # in the minimal entry that is sure to convert properly to the # required type. # # Strip spaces but not dashes (not allowed in NULL row per # https://github.com/astropy/astropy/issues/361) null = header_vals[3][i].strip() fillval = '' if issubclass(col.type, core.StrType) else '0' self.data.fill_values.append((null, fillval, col.name)) start = col.end + 1 cols.append(col) # Correct column start/end based on definition if self.ipac_definition == 'right': col.start -= 1 elif self.ipac_definition == 'left': col.end += 1 self.names = [x.name for x in cols] self.cols = cols def str_vals(self): if self.DBMS: IpacFormatE = IpacFormatErrorDBMS else: IpacFormatE = IpacFormatError namelist = self.colnames if self.DBMS: countnamelist = defaultdict(int) for name in self.colnames: countnamelist[name.lower()] += 1 doublenames = [x for x in countnamelist if countnamelist[x] > 1] if doublenames != []: raise IpacFormatE('IPAC DBMS tables are not case sensitive. ' 'This causes duplicate column names: {0}'.format(doublenames)) for name in namelist: m = re.match(r'\w+', name) if m.end() != len(name): raise IpacFormatE('{0} - Only alphanumeric characters and _ ' 'are allowed in column names.'.format(name)) if self.DBMS and not(name[0].isalpha() or (name[0] == '_')): raise IpacFormatE('Column name cannot start with numbers: {}'.format(name)) if self.DBMS: if name in ['x', 'y', 'z', 'X', 'Y', 'Z']: raise IpacFormatE('{0} - x, y, z, X, Y, Z are reserved names and ' 'cannot be used as column names.'.format(name)) if len(name) > 16: raise IpacFormatE( '{0} - Maximum length for column name is 16 characters'.format(name)) else: if len(name) > 40: raise IpacFormatE( '{0} - Maximum length for column name is 40 characters.'.format(name)) dtypelist = [] unitlist = [] nullist = [] for col in self.cols: col_dtype = col.info.dtype col_unit = col.info.unit col_format = col.info.format if col_dtype.kind in ['i', 'u']: dtypelist.append('long') elif col_dtype.kind == 'f': dtypelist.append('double') else: dtypelist.append('char') if col_unit is None: unitlist.append('') else: unitlist.append(str(col.info.unit)) # This may be incompatible with mixin columns null = col.fill_values[core.masked] try: auto_format_func = get_auto_format_func(col) format_func = col.info._format_funcs.get(col_format, auto_format_func) nullist.append((format_func(col_format, null)).strip()) except Exception: # It is possible that null and the column values have different # data types (e.g. number and null = 'null' (i.e. a string). # This could cause all kinds of exceptions, so a catch all # block is needed here nullist.append(str(null).strip()) return [namelist, dtypelist, unitlist, nullist] def write(self, lines, widths): '''Write header. The width of each column is determined in Ipac.write. Writing the header must be delayed until that time. This function is called from there, once the width information is available.''' for vals in self.str_vals(): lines.append(self.splitter.join(vals, widths)) return lines class IpacDataSplitter(fixedwidth.FixedWidthSplitter): delimiter = ' ' delimiter_pad = '' bookend = True class IpacData(fixedwidth.FixedWidthData): """IPAC table data reader""" comment = r'[|\\]' start_line = 0 splitter_class = IpacDataSplitter fill_values = [(core.masked, 'null')] def write(self, lines, widths, vals_list): """ IPAC writer, modified from FixedWidth writer """ for vals in vals_list: lines.append(self.splitter.join(vals, widths)) return lines class Ipac(basic.Basic): r"""Read or write an IPAC format table. See http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html:: \\name=value \\ Comment | column1 | column2 | column3 | column4 | column5 | | double | double | int | double | char | | unit | unit | unit | unit | unit | | null | null | null | null | null | 2.0978 29.09056 73765 2.06000 B8IVpMnHg Or:: |-----ra---|----dec---|---sao---|------v---|----sptype--------| 2.09708 29.09056 73765 2.06000 B8IVpMnHg The comments and keywords defined in the header are available via the output table ``meta`` attribute:: >>> import os >>> from astropy.io import ascii >>> filename = os.path.join(ascii.__path__[0], 'tests/t/ipac.dat') >>> data = ascii.read(filename) >>> print(data.meta['comments']) ['This is an example of a valid comment'] >>> for name, keyword in data.meta['keywords'].items(): ... print(name, keyword['value']) ... intval 1 floatval 2300.0 date Wed Sp 20 09:48:36 1995 key_continue IPAC keywords can continue across lines Note that there are different conventions for characters occuring below the position of the ``|`` symbol in IPAC tables. By default, any character below a ``|`` will be ignored (since this is the current standard), but if you need to read files that assume characters below the ``|`` symbols belong to the column before or after the ``|``, you can specify ``definition='left'`` or ``definition='right'`` respectively when reading the table (the default is ``definition='ignore'``). The following examples demonstrate the different conventions: * ``definition='ignore'``:: | ra | dec | | float | float | 1.2345 6.7890 * ``definition='left'``:: | ra | dec | | float | float | 1.2345 6.7890 * ``definition='right'``:: | ra | dec | | float | float | 1.2345 6.7890 IPAC tables can specify a null value in the header that is shown in place of missing or bad data. On writing, this value defaults to ``null``. To specify a different null value, use the ``fill_values`` option to replace masked values with a string or number of your choice as described in :ref:`io_ascii_write_parameters`:: >>> from astropy.io.ascii import masked >>> fill = [(masked, 'N/A', 'ra'), (masked, -999, 'sptype')] >>> ascii.write(data, format='ipac', fill_values=fill) \ This is an example of a valid comment ... | ra| dec| sai| v2| sptype| | double| double| long| double| char| | unit| unit| unit| unit| ergs| | N/A| null| null| null| -999| N/A 29.09056 null 2.06 -999 2345678901.0 3456789012.0 456789012 4567890123.0 567890123456789012 Parameters ---------- definition : str, optional Specify the convention for characters in the data table that occur directly below the pipe (``|``) symbol in the header column definition: * 'ignore' - Any character beneath a pipe symbol is ignored (default) * 'right' - Character is associated with the column to the right * 'left' - Character is associated with the column to the left DBMS : bool, optional If true, this verifies that written tables adhere (semantically) to the `IPAC/DBMS <http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/DBMSrestriction.html>`_ definition of IPAC tables. If 'False' it only checks for the (less strict) `IPAC <http://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html>`_ definition. """ _format_name = 'ipac' _io_registry_format_aliases = ['ipac'] _io_registry_can_write = True _description = 'IPAC format table' data_class = IpacData header_class = IpacHeader def __init__(self, definition='ignore', DBMS=False): super().__init__() # Usually the header is not defined in __init__, but here it need a keyword if definition in ['ignore', 'left', 'right']: self.header.ipac_definition = definition else: raise ValueError("definition should be one of ignore/left/right") self.header.DBMS = DBMS def write(self, table): """ Write ``table`` as list of strings. Parameters ---------- table : `~astropy.table.Table` Input table data Returns ------- lines : list List of strings corresponding to ASCII table """ # Set a default null value for all columns by adding at the end, which # is the position with the lowest priority. # We have to do it this late, because the fill_value # defined in the class can be overwritten by ui.write self.data.fill_values.append((core.masked, 'null')) # Check column names before altering self.header.cols = list(table.columns.values()) self.header.check_column_names(self.names, self.strict_names, self.guessing) core._apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names) # Now use altered columns new_cols = list(table.columns.values()) # link information about the columns to the writer object (i.e. self) self.header.cols = new_cols self.data.cols = new_cols # Write header and data to lines list lines = [] # Write meta information if 'comments' in table.meta: for comment in table.meta['comments']: if len(str(comment)) > 78: warn('Comment string > 78 characters was automatically wrapped.', AstropyUserWarning) for line in wrap(str(comment), 80, initial_indent='\\ ', subsequent_indent='\\ '): lines.append(line) if 'keywords' in table.meta: keydict = table.meta['keywords'] for keyword in keydict: try: val = keydict[keyword]['value'] lines.append('\\{0}={1!r}'.format(keyword.strip(), val)) # meta is not standardized: Catch some common Errors. except TypeError: warn("Table metadata keyword {0} has been skipped. " "IPAC metadata must be in the form {{'keywords':" "{{'keyword': {{'value': value}} }}".format(keyword), AstropyUserWarning) ignored_keys = [key for key in table.meta if key not in ('keywords', 'comments')] if any(ignored_keys): warn("Table metadata keyword(s) {0} were not written. " "IPAC metadata must be in the form {{'keywords':" "{{'keyword': {{'value': value}} }}".format(ignored_keys), AstropyUserWarning ) # Usually, this is done in data.write, but since the header is written # first, we need that here. self.data._set_fill_values(self.data.cols) # get header and data as strings to find width of each column for i, col in enumerate(table.columns.values()): col.headwidth = max([len(vals[i]) for vals in self.header.str_vals()]) # keep data_str_vals because they take some time to make data_str_vals = [] col_str_iters = self.data.str_vals() for vals in zip(*col_str_iters): data_str_vals.append(vals) for i, col in enumerate(table.columns.values()): # FIXME: In Python 3.4, use max([], default=0). # See: https://docs.python.org/3/library/functions.html#max if data_str_vals: col.width = max([len(vals[i]) for vals in data_str_vals]) else: col.width = 0 widths = [max(col.width, col.headwidth) for col in table.columns.values()] # then write table self.header.write(lines, widths) self.data.write(lines, widths, data_str_vals) return lines
4ff19f16e0a76c0f82e192ca545ee10be27e22f72f83d3059633060fb75d1c16
# Licensed under a 3-clause BSD style license import os from distutils.extension import Extension ROOT = os.path.relpath(os.path.dirname(__file__)) def get_extensions(): sources = [os.path.join(ROOT, 'cparser.pyx'), os.path.join(ROOT, 'src', 'tokenizer.c')] ascii_ext = Extension( name="astropy.io.ascii.cparser", include_dirs=["numpy"], sources=sources) return [ascii_ext] def get_package_data(): # Installs the testing data files. Unable to get package_data # to deal with a directory hierarchy of files, so just explicitly list. return { 'astropy.io.ascii.tests': ['t/vizier/ReadMe', 't/vizier/table1.dat', 't/vizier/table5.dat', 't/apostrophe.rdb', 't/apostrophe.tab', 't/bad.txt', 't/bars_at_ends.txt', 't/cds.dat', 't/cds_malformed.dat', 't/cds/glob/ReadMe', 't/cds/glob/lmxbrefs.dat', 't/cds/multi/ReadMe', 't/cds/multi/lhs2065.dat', 't/cds/multi/lp944-20.dat', 't/cds2.dat', 't/commented_header.dat', 't/commented_header2.dat', 't/continuation.dat', 't/daophot.dat', 't/daophot2.dat', 't/daophot3.dat', 't/daophot4.dat', 't/sextractor.dat', 't/sextractor2.dat', 't/sextractor3.dat', 't/daophot.dat.gz', 't/fill_values.txt', 't/html.html', 't/html2.html', 't/ipac.dat', 't/ipac.dat.bz2', 't/ipac.dat.xz', 't/latex1.tex', 't/latex1.tex.gz', 't/latex2.tex', 't/latex3.tex', 't/nls1_stackinfo.dbout', 't/no_data_cds.dat', 't/no_data_daophot.dat', 't/no_data_sextractor.dat', 't/no_data_ipac.dat', 't/no_data_with_header.dat', 't/no_data_without_header.dat', 't/short.rdb', 't/short.rdb.bz2', 't/short.rdb.gz', 't/short.rdb.xz', 't/short.tab', 't/simple.txt', 't/simple2.txt', 't/simple3.txt', 't/simple4.txt', 't/simple5.txt', 't/space_delim_blank_lines.txt', 't/space_delim_no_header.dat', 't/space_delim_no_names.dat', 't/test4.dat', 't/test5.dat', 't/vots_spec.dat', 't/whitespace.dat', 't/simple_csv.csv', 't/simple_csv_missing.csv', 't/fixed_width_2_line.txt', 't/cds/description/ReadMe', 't/cds/description/table.dat', ] }
c94614232640adf9b5e1a0494fe7ad8ef78a5b8ef615ad55c0e232f50ab56afb
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible HTML table reader and writer. html.py: Classes to read and write HTML tables `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ must be installed to read HTML tables. """ import warnings import numpy from . import core from ...table import Column from ...utils.xml import writer from copy import deepcopy class SoupString(str): """ Allows for strings to hold BeautifulSoup data. """ def __new__(cls, *args, **kwargs): return str.__new__(cls, *args, **kwargs) def __init__(self, val): self.soup = val class ListWriter: """ Allows for XMLWriter to write to a list instead of a file. """ def __init__(self, out): self.out = out def write(self, data): self.out.append(data) def identify_table(soup, htmldict, numtable): """ Checks whether the given BeautifulSoup tag is the table the user intends to process. """ if soup is None or soup.name != 'table': return False # Tag is not a <table> elif 'table_id' not in htmldict: return numtable == 1 table_id = htmldict['table_id'] if isinstance(table_id, str): return 'id' in soup.attrs and soup['id'] == table_id elif isinstance(table_id, int): return table_id == numtable # Return False if an invalid parameter is given return False class HTMLInputter(core.BaseInputter): """ Input lines of HTML in a valid form. This requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ to be installed. """ def process_lines(self, lines): """ Convert the given input into a list of SoupString rows for further processing. """ try: from bs4 import BeautifulSoup except ImportError: raise core.OptionalTableImportError('BeautifulSoup must be ' 'installed to read HTML tables') if 'parser' not in self.html: with warnings.catch_warnings(): # Ignore bs4 parser warning #4550. warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*') soup = BeautifulSoup('\n'.join(lines)) else: # use a custom backend parser soup = BeautifulSoup('\n'.join(lines), self.html['parser']) tables = soup.find_all('table') for i, possible_table in enumerate(tables): if identify_table(possible_table, self.html, i + 1): table = possible_table # Find the correct table break else: if isinstance(self.html['table_id'], int): err_descr = 'number {0}'.format(self.html['table_id']) else: err_descr = "id '{0}'".format(self.html['table_id']) raise core.InconsistentTableError( 'ERROR: HTML table {0} not found'.format(err_descr)) # Get all table rows soup_list = [SoupString(x) for x in table.find_all('tr')] return soup_list class HTMLSplitter(core.BaseSplitter): """ Split HTML table data. """ def __call__(self, lines): """ Return HTML data from lines as a generator. """ for line in lines: if not isinstance(line, SoupString): raise TypeError('HTML lines should be of type SoupString') soup = line.soup header_elements = soup.find_all('th') if header_elements: # Return multicolumns as tuples for HTMLHeader handling yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan') else el.text.strip() for el in header_elements] data_elements = soup.find_all('td') if data_elements: yield [el.text.strip() for el in data_elements] if len(lines) == 0: raise core.InconsistentTableError('HTML tables must contain data ' 'in a <table> tag') class HTMLOutputter(core.TableOutputter): """ Output the HTML data as an ``astropy.table.Table`` object. This subclass allows for the final table to contain multidimensional columns (defined using the colspan attribute of <th>). """ default_converters = [core.convert_numpy(numpy.int), core.convert_numpy(numpy.float), core.convert_numpy(numpy.str), core.convert_numpy(numpy.unicode)] def __call__(self, cols, meta): """ Process the data in multidimensional columns. """ new_cols = [] col_num = 0 while col_num < len(cols): col = cols[col_num] if hasattr(col, 'colspan'): # Join elements of spanned columns together into list of tuples span_cols = cols[col_num:col_num + col.colspan] new_col = core.Column(col.name) new_col.str_vals = list(zip(*[x.str_vals for x in span_cols])) new_cols.append(new_col) col_num += col.colspan else: new_cols.append(col) col_num += 1 return super().__call__(new_cols, meta) class HTMLHeader(core.BaseHeader): splitter_class = HTMLSplitter def start_line(self, lines): """ Return the line number at which header data begins. """ for i, line in enumerate(lines): if not isinstance(line, SoupString): raise TypeError('HTML lines should be of type SoupString') soup = line.soup if soup.th is not None: return i return None def _set_cols_from_names(self): """ Set columns from header names, handling multicolumns appropriately. """ self.cols = [] new_names = [] for name in self.names: if isinstance(name, tuple): col = core.Column(name=name[0]) col.colspan = int(name[1]) self.cols.append(col) new_names.append(name[0]) for i in range(1, int(name[1])): # Add dummy columns self.cols.append(core.Column('')) new_names.append('') else: self.cols.append(core.Column(name=name)) new_names.append(name) self.names = new_names class HTMLData(core.BaseData): splitter_class = HTMLSplitter def start_line(self, lines): """ Return the line number at which table data begins. """ for i, line in enumerate(lines): if not isinstance(line, SoupString): raise TypeError('HTML lines should be of type SoupString') soup = line.soup if soup.td is not None: if soup.th is not None: raise core.InconsistentTableError('HTML tables cannot ' 'have headings and data in the same row') return i raise core.InconsistentTableError('No start line found for HTML data') def end_line(self, lines): """ Return the line number at which table data ends. """ last_index = -1 for i, line in enumerate(lines): if not isinstance(line, SoupString): raise TypeError('HTML lines should be of type SoupString') soup = line.soup if soup.td is not None: last_index = i if last_index == -1: return None return last_index + 1 class HTML(core.BaseReader): """Read and write HTML tables. In order to customize input and output, a dict of parameters may be passed to this class holding specific customizations. **htmldict** : Dictionary of parameters for HTML input/output. * css : Customized styling If present, this parameter will be included in a <style> tag and will define stylistic attributes of the output. * table_id : ID for the input table If a string, this defines the HTML id of the table to be processed. If an integer, this specifies the index of the input table in the available tables. Unless this parameter is given, the reader will use the first table found in the input file. * multicol : Use multi-dimensional columns for output The writer will output tuples as elements of multi-dimensional columns if this parameter is true, and if not then it will use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not present, this parameter will be true by default. * raw_html_cols : column name or list of names with raw HTML content This allows one to include raw HTML content in the column output, for instance to include link references in a table. This option requires that the bleach package be installed. Only whitelisted tags are allowed through for security reasons (see the raw_html_clean_kwargs arg). * raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning Raw HTML will be cleaned to prevent unsafe HTML from ending up in the table output. This is done by calling ``bleach.clean(data, **raw_html_clean_kwargs)``. For details on the available options (e.g. tag whitelist) see: http://bleach.readthedocs.io/en/latest/clean.html * parser : Specific HTML parsing library to use If specified, this specifies which HTML parsing library BeautifulSoup should use as a backend. The options to choose from are 'html.parser' (the standard library parser), 'lxml' (the recommended parser), 'xml' (lxml's XML parser), and 'html5lib'. html5lib is a highly lenient parser and therefore might work correctly for unusual input if a different parser fails. * jsfiles : list of js files to include when writing table. * cssfiles : list of css files to include when writing table. * js : js script to include in the body when writing table. * table_class : css class for the table """ _format_name = 'html' _io_registry_format_aliases = ['html'] _io_registry_suffix = '.html' _description = 'HTML table' header_class = HTMLHeader data_class = HTMLData inputter_class = HTMLInputter def __init__(self, htmldict={}): """ Initialize classes for HTML reading and writing. """ super().__init__() self.html = deepcopy(htmldict) if 'multicol' not in htmldict: self.html['multicol'] = True if 'table_id' not in htmldict: self.html['table_id'] = 1 self.inputter.html = self.html def read(self, table): """ Read the ``table`` in HTML format and return a resulting ``Table``. """ self.outputter = HTMLOutputter() return super().read(table) def write(self, table): """ Return data in ``table`` converted to HTML as a list of strings. """ cols = list(table.columns.values()) self.data.header.cols = cols if isinstance(self.data.fill_values, tuple): self.data.fill_values = [self.data.fill_values] self.data._set_fill_values(cols) lines = [] # Set HTML escaping to False for any column in the raw_html_cols input raw_html_cols = self.html.get('raw_html_cols', []) if isinstance(raw_html_cols, str): raw_html_cols = [raw_html_cols] # Allow for a single string as input cols_escaped = [col.info.name not in raw_html_cols for col in cols] # Kwargs that get passed on to bleach.clean() if that is available. raw_html_clean_kwargs = self.html.get('raw_html_clean_kwargs', {}) # Use XMLWriter to output HTML to lines w = writer.XMLWriter(ListWriter(lines)) with w.tag('html'): with w.tag('head'): # Declare encoding and set CSS style for table with w.tag('meta', attrib={'charset': 'utf-8'}): pass with w.tag('meta', attrib={'http-equiv': 'Content-type', 'content': 'text/html;charset=UTF-8'}): pass if 'css' in self.html: with w.tag('style'): w.data(self.html['css']) if 'cssfiles' in self.html: for filename in self.html['cssfiles']: with w.tag('link', rel="stylesheet", href=filename, type='text/css'): pass if 'jsfiles' in self.html: for filename in self.html['jsfiles']: with w.tag('script', src=filename): w.data('') # need this instead of pass to get <script></script> with w.tag('body'): if 'js' in self.html: with w.xml_cleaning_method('none'): with w.tag('script'): w.data(self.html['js']) if isinstance(self.html['table_id'], str): html_table_id = self.html['table_id'] else: html_table_id = None if 'table_class' in self.html: html_table_class = self.html['table_class'] attrib = {"class": html_table_class} else: attrib = {} with w.tag('table', id=html_table_id, attrib=attrib): with w.tag('thead'): with w.tag('tr'): for col in cols: if len(col.shape) > 1 and self.html['multicol']: # Set colspan attribute for multicolumns w.start('th', colspan=col.shape[1]) else: w.start('th') w.data(col.info.name.strip()) w.end(indent=False) col_str_iters = [] new_cols_escaped = [] for col, col_escaped in zip(cols, cols_escaped): if len(col.shape) > 1 and self.html['multicol']: span = col.shape[1] for i in range(span): # Split up multicolumns into separate columns new_col = Column([el[i] for el in col]) new_col_iter_str_vals = self.fill_values(col, new_col.info.iter_str_vals()) col_str_iters.append(new_col_iter_str_vals) new_cols_escaped.append(col_escaped) else: col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals()) col_str_iters.append(col_iter_str_vals) new_cols_escaped.append(col_escaped) for row in zip(*col_str_iters): with w.tag('tr'): for el, col_escaped in zip(row, new_cols_escaped): # Potentially disable HTML escaping for column method = ('escape_xml' if col_escaped else 'bleach_clean') with w.xml_cleaning_method(method, **raw_html_clean_kwargs): w.start('td') w.data(el.strip()) w.end(indent=False) # Fixes XMLWriter's insertion of unwanted line breaks return [''.join(lines)] def fill_values(self, col, col_str_iters): """ Return an iterator of the values with replacements based on fill_values """ # check if the col is a masked column and has fill values is_masked_column = hasattr(col, 'mask') has_fill_values = hasattr(col, 'fill_values') for idx, col_str in enumerate(col_str_iters): if is_masked_column and has_fill_values: if col.mask[idx]: yield col.fill_values[core.masked] continue if has_fill_values: if col_str in col.fill_values: yield col.fill_values[col_str] continue yield col_str
984c3e79d031960de0ee2166a7cc87b1486bbac661f9a28223dd15e8002fe421
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ sextractor.py: Classes to read SExtractor table format Built on daophot.py: :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re from . import core class SExtractorHeader(core.BaseHeader): """Read the header from a file produced by SExtractor.""" comment = r'^\s*#\s*\S\D.*' # Find lines that don't have "# digit" def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines`` for a SExtractor header. The SExtractor header is specialized so that we just copy the entire BaseHeader get_cols routine and modify as needed. Parameters ---------- lines : list List of table lines """ # This assumes that the columns are listed in order, one per line with a # header comment string of the format: "# 1 ID short description [unit]" # However, some may be missing and must be inferred from skipped column numbers columns = {} # E.g. '# 1 ID identification number' (without units) or '# 2 MAGERR magnitude of error [mag]' # Updated along with issue #4603, for more robust parsing of unit re_name_def = re.compile(r"""^\s* \# \s* # possible whitespace around # (?P<colnumber> [0-9]+)\s+ # number of the column in table (?P<colname> [-\w]+) # name of the column (?:\s+(?P<coldescr> \w .+) # column description, match any character until... (?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))? # ...until [non-space][space][unit] or [not-right-bracket][end] (?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets """, re.VERBOSE) dataline = None for line in lines: if not line.startswith('#'): dataline = line # save for later to infer the actual number of columns break # End of header lines else: match = re_name_def.search(line) if match: colnumber = int(match.group('colnumber')) colname = match.group('colname') coldescr = match.group('coldescr') colunit = match.group('colunit') # If no units are given, colunit = None columns[colnumber] = (colname, coldescr, colunit) # Handle skipped column numbers colnumbers = sorted(columns) # Handle the case where the last column is array-like by append a pseudo column # If there are more data columns than the largest column number # then add a pseudo-column that will be dropped later. This allows # the array column logic below to work in all cases. if dataline is not None: n_data_cols = len(dataline.split()) else: n_data_cols = colnumbers[-1] # handles no data, where we have to rely on the last column number # sextractor column number start at 1. columns[n_data_cols + 1] = (None, None, None) colnumbers.append(n_data_cols + 1) if len(columns) > 1: # only fill in skipped columns when there is genuine column initially previous_column = 0 for n in colnumbers: if n != previous_column + 1: for c in range(previous_column+1, n): column_name = columns[previous_column][0]+"_{}".format(c-previous_column) column_descr = columns[previous_column][1] column_unit = columns[previous_column][2] columns[c] = (column_name, column_descr, column_unit) previous_column = n # Add the columns in order to self.names colnumbers = sorted(columns)[:-1] # drop the pseudo column self.names = [] for n in colnumbers: self.names.append(columns[n][0]) if not self.names: raise core.InconsistentTableError('No column names found in SExtractor header') self.cols = [] for n in colnumbers: col = core.Column(name=columns[n][0]) col.description = columns[n][1] col.unit = columns[n][2] self.cols.append(col) class SExtractorData(core.BaseData): start_line = 0 delimiter = ' ' comment = r'\s*#' class SExtractor(core.BaseReader): """Read a SExtractor file. SExtractor is a package for faint-galaxy photometry. Bertin & Arnouts 1996, A&A Supp. 317, 393. http://www.astromatic.net/software/sextractor Example:: # 1 NUMBER # 2 ALPHA_J2000 # 3 DELTA_J2000 # 4 FLUX_RADIUS # 7 MAG_AUTO [mag] # 8 X2_IMAGE Variance along x [pixel**2] # 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)] # 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)] 1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498 2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401 Note the skipped numbers since flux_radius has 3 columns. The three FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2 Also note that a post-ID description (e.g. "Variance along x") is optional and that units may be specified at the end of a line in brackets. """ _format_name = 'sextractor' _io_registry_can_write = False _description = 'SExtractor format table' header_class = SExtractorHeader data_class = SExtractorData inputter_class = core.ContinuationLinesInputter def read(self, table): """ Read input data (file-like object, filename, list of strings, or single string) into a Table and return the result. """ out = super().read(table) # remove the comments if 'comments' in out.meta: del out.meta['comments'] return out def write(self, table): raise NotImplementedError
0e9cab86eaedb1497667001403ff0a83e85bdfef669d4145ba858e1f058c968d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """An extensible ASCII table reader and writer. ui.py: Provides the main user functions for reading and writing tables. :Copyright: Smithsonian Astrophysical Observatory (2010) :Author: Tom Aldcroft ([email protected]) """ import re import os import sys import copy import time import warnings import contextlib from io import StringIO import numpy as np from . import core from . import basic from . import cds from . import daophot from . import ecsv from . import sextractor from . import ipac from . import latex from . import html from . import fastbasic from . import cparser from . import fixedwidth from ...table import Table, vstack from ...utils.data import get_readable_fileobj from ...utils.exceptions import AstropyWarning, AstropyDeprecationWarning _read_trace = [] try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False # Default setting for guess parameter in read() _GUESS = True def _probably_html(table, maxchars=100000): """ Determine if ``table`` probably contains HTML content. See PR #3693 and issue #3691 for context. """ if not isinstance(table, str): try: # If table is an iterable (list of strings) then take the first # maxchars of these. Make sure this is something with random # access to exclude a file-like object table[0] table[:1] size = 0 for i, line in enumerate(table): size += len(line) if size > maxchars: break table = os.linesep.join(table[:i+1]) except Exception: pass if isinstance(table, str): # Look for signs of an HTML table in the first maxchars characters table = table[:maxchars] # URL ending in .htm or .html if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table, re.IGNORECASE | re.VERBOSE): return True # Filename ending in .htm or .html which exists if re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and os.path.exists(table): return True # Table starts with HTML document type declaration if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE): return True # Look for <TABLE .. >, <TR .. >, <TD .. > tag openers. if all(re.search(r'< \s* {0} [^>]* >'.format(element), table, re.IGNORECASE | re.VERBOSE) for element in ('table', 'tr', 'td')): return True return False def set_guess(guess): """ Set the default value of the ``guess`` parameter for read() Parameters ---------- guess : bool New default ``guess`` value (e.g., True or False) """ global _GUESS _GUESS = guess def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs): """ Initialize a table reader allowing for common customizations. Most of the default behavior for various parameters is determined by the Reader class. Parameters ---------- Reader : `~astropy.io.ascii.BaseReader` Reader class (DEPRECATED). Default is :class:`Basic`. Inputter : `~astropy.io.ascii.BaseInputter` Inputter class Outputter : `~astropy.io.ascii.BaseOutputter` Outputter class delimiter : str Column delimiter string comment : str Regular expression defining a comment line in table quotechar : str One-character string to quote fields containing special characters header_start : int Line index for the header line not counting comment or blank lines. A line with only whitespace is considered blank. data_start : int Line index for the start of data not counting comment or blank lines. A line with only whitespace is considered blank. data_end : int Line index for the end of data not counting comment or blank lines. This value can be negative to count from the end. converters : dict Dictionary of converters. data_Splitter : `~astropy.io.ascii.BaseSplitter` Splitter class to split data columns. header_Splitter : `~astropy.io.ascii.BaseSplitter` Splitter class to split header columns. names : list List of names corresponding to each data column. include_names : list, optional List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``). fill_values : dict Specification of fill values for bad or missing table values. fill_include_names : list List of names to include in fill_values. fill_exclude_names : list List of names to exclude from fill_values (applied after ``fill_include_names``). Returns ------- reader : `~astropy.io.ascii.BaseReader` subclass ASCII format reader instance """ # This function is a light wrapper around core._get_reader to provide a public interface # with a default Reader. if Reader is None: Reader = basic.Basic reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs) return reader def _get_format_class(format, ReaderWriter, label): if format is not None and ReaderWriter is not None: raise ValueError('Cannot supply both format and {0} keywords'.format(label)) if format is not None: if format in core.FORMAT_CLASSES: ReaderWriter = core.FORMAT_CLASSES[format] else: raise ValueError('ASCII format {0!r} not in allowed list {1}' .format(format, sorted(core.FORMAT_CLASSES))) return ReaderWriter def read(table, guess=None, **kwargs): """ Read the input ``table`` and return the table. Most of the default behavior for various parameters is determined by the Reader class. Parameters ---------- table : str, file-like, list, pathlib.Path object Input table as a file name, file-like object, list of strings, single newline-separated string or pathlib.Path object . guess : bool Try to guess the table format. Defaults to None. format : str, `~astropy.io.ascii.BaseReader` Input table format Inputter : `~astropy.io.ascii.BaseInputter` Inputter class Outputter : `~astropy.io.ascii.BaseOutputter` Outputter class delimiter : str Column delimiter string comment : str Regular expression defining a comment line in table quotechar : str One-character string to quote fields containing special characters header_start : int Line index for the header line not counting comment or blank lines. A line with only whitespace is considered blank. data_start : int Line index for the start of data not counting comment or blank lines. A line with only whitespace is considered blank. data_end : int Line index for the end of data not counting comment or blank lines. This value can be negative to count from the end. converters : dict Dictionary of converters data_Splitter : `~astropy.io.ascii.BaseSplitter` Splitter class to split data columns header_Splitter : `~astropy.io.ascii.BaseSplitter` Splitter class to split header columns names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fill_values : dict specification of fill values for bad or missing table values fill_include_names : list List of names to include in fill_values. fill_exclude_names : list List of names to exclude from fill_values (applied after ``fill_include_names``) fast_reader : bool or dict Whether to use the C engine, can also be a dict with options which defaults to `False`; parameters for options dict: use_fast_converter: bool enable faster but slightly imprecise floating point conversion method parallel: bool or int multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes exponent_style: str One-character string defining the exponent or ``'Fortran'`` to auto-detect Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``), all case-insensitive; default ``'E'``, all other imply ``use_fast_converter`` chunk_size : int If supplied with a value > 0 then read the table in chunks of approximately ``chunk_size`` bytes. Default is reading table in one pass. chunk_generator : bool If True and ``chunk_size > 0`` then return an iterator that returns a table for each chunk. The default is to return a single stacked table for all the chunks. Reader : `~astropy.io.ascii.BaseReader` Reader class (DEPRECATED) encoding: str Allow to specify encoding to read the file (default= ``None``). Returns ------- dat : `~astropy.table.Table` OR <generator> Output table """ del _read_trace[:] # Downstream readers might munge kwargs kwargs = copy.deepcopy(kwargs) # Convert fast_reader into a dict if not already and make sure 'enable' # key is available. fast_reader = kwargs.get('fast_reader', True) if isinstance(fast_reader, dict): fast_reader.setdefault('enable', 'force') else: fast_reader = {'enable': fast_reader} kwargs['fast_reader'] = fast_reader if fast_reader['enable'] and fast_reader.get('chunk_size'): return _read_in_chunks(table, **kwargs) if 'fill_values' not in kwargs: kwargs['fill_values'] = [('', '0')] # If an Outputter is supplied in kwargs that will take precedence. new_kwargs = {} if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading fast_reader['enable'] = False format = kwargs.get('format') new_kwargs.update(kwargs) # Get the Reader class based on possible format and Reader kwarg inputs. Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader') if Reader is not None: new_kwargs['Reader'] = Reader format = Reader._format_name # Remove format keyword if there, this is only allowed in read() not get_reader() if 'format' in new_kwargs: del new_kwargs['format'] if guess is None: guess = _GUESS if guess: # If ``table`` is probably an HTML file then tell guess function to add # the HTML reader at the top of the guess list. This is in response to # issue #3691 (and others) where libxml can segfault on a long non-HTML # file, thus prompting removal of the HTML reader from the default # guess list. new_kwargs['guess_html'] = _probably_html(table) # If `table` is a filename or readable file object then read in the # file now. This prevents problems in Python 3 with the file object # getting closed or left at the file end. See #3132, #3013, #3109, # #2001. If a `readme` arg was passed that implies CDS format, in # which case the original `table` as the data filename must be left # intact. if 'readme' not in new_kwargs: encoding = kwargs.get('encoding') try: with get_readable_fileobj(table, encoding=encoding) as fileobj: table = fileobj.read() except ValueError: # unreadable or invalid binary file raise except Exception: pass else: # Ensure that `table` has at least one \r or \n in it # so that the core.BaseInputter test of # ('\n' not in table and '\r' not in table) # will fail and so `table` cannot be interpreted there # as a filename. See #4160. if not re.search(r'[\r\n]', table): table = table + os.linesep # If the table got successfully read then look at the content # to see if is probably HTML, but only if it wasn't already # identified as HTML based on the filename. if not new_kwargs['guess_html']: new_kwargs['guess_html'] = _probably_html(table) # Get the table from guess in ``dat``. If ``dat`` comes back as None # then there was just one set of kwargs in the guess list so fall # through below to the non-guess way so that any problems result in a # more useful traceback. dat = _guess(table, new_kwargs, format, fast_reader) if dat is None: guess = False if not guess: reader = get_reader(**new_kwargs) if format is None: format = reader._format_name # Try the fast reader version of `format` first if applicable. Note that # if user specified a fast format (e.g. format='fast_basic') this test # will fail and the else-clause below will be used. if fast_reader['enable'] and 'fast_{0}'.format(format) in core.FAST_CLASSES: fast_kwargs = copy.deepcopy(new_kwargs) fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(format)] fast_reader_rdr = get_reader(**fast_kwargs) try: dat = fast_reader_rdr.read(table) _read_trace.append({'kwargs': fast_kwargs, 'Reader': fast_reader_rdr.__class__, 'status': 'Success with fast reader (no guessing)'}) except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err: # special testing value to avoid falling back on the slow reader if fast_reader['enable'] == 'force': raise core.InconsistentTableError( 'fast reader {} exception: {}' .format(fast_reader_rdr.__class__, err)) # If the fast reader doesn't work, try the slow version dat = reader.read(table) _read_trace.append({'kwargs': new_kwargs, 'Reader': reader.__class__, 'status': 'Success with slow reader after failing' ' with fast (no guessing)'}) else: dat = reader.read(table) _read_trace.append({'kwargs': new_kwargs, 'Reader': reader.__class__, 'status': 'Success with specified Reader class ' '(no guessing)'}) return dat def _guess(table, read_kwargs, format, fast_reader): """ Try to read the table using various sets of keyword args. Start with the standard guess list and filter to make it unique and consistent with user-supplied read keyword args. Finally, if none of those work then try the original user-supplied keyword args. Parameters ---------- table : str, file-like, list Input table as a file name, file-like object, list of strings, or single newline-separated string. read_kwargs : dict Keyword arguments from user to be supplied to reader format : str Table format fast_reader : dict Options for the C engine fast reader. See read() function for details. Returns ------- dat : `~astropy.table.Table` or None Output table or None if only one guess format was available """ # Keep a trace of all failed guesses kwarg failed_kwargs = [] # Get an ordered list of read() keyword arg dicts that will be cycled # through in order to guess the format. full_list_guess = _get_guess_kwargs_list(read_kwargs) # If a fast version of the reader is available, try that before the slow version if fast_reader['enable'] and format is not None and 'fast_{0}'.format(format) in \ core.FAST_CLASSES: fast_kwargs = read_kwargs.copy() fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(format)] full_list_guess = [fast_kwargs] + full_list_guess else: fast_kwargs = None # Filter the full guess list so that each entry is consistent with user kwarg inputs. # This also removes any duplicates from the list. filtered_guess_kwargs = [] fast_reader = read_kwargs.get('fast_reader') for guess_kwargs in full_list_guess: # If user specified slow reader then skip all fast readers if (fast_reader['enable'] is False and guess_kwargs['Reader'] in core.FAST_CLASSES.values()): continue # If user required a fast reader then skip all non-fast readers if (fast_reader['enable'] == 'force' and guess_kwargs['Reader'] not in core.FAST_CLASSES.values()): continue guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs? for key, val in read_kwargs.items(): # Do guess_kwargs.update(read_kwargs) except that if guess_args has # a conflicting key/val pair then skip this guess entirely. if key not in guess_kwargs: guess_kwargs[key] = val elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs: guess_kwargs_ok = False break if not guess_kwargs_ok: # User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g. # user supplies delimiter="|" but the guess wants to try delimiter=" ", # so skip the guess entirely. continue # Add the guess_kwargs to filtered list only if it is not already there. if guess_kwargs not in filtered_guess_kwargs: filtered_guess_kwargs.append(guess_kwargs) # If there are not at least two formats to guess then return no table # (None) to indicate that guessing did not occur. In that case the # non-guess read() will occur and any problems will result in a more useful # traceback. if len(filtered_guess_kwargs) <= 1: return None # Define whitelist of exceptions that are expected from readers when # processing invalid inputs. Note that OSError must fall through here # so one cannot simply catch any exception. guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError, AttributeError, core.OptionalTableImportError, core.ParameterError, cparser.CParserError) # Now cycle through each possible reader and associated keyword arguments. # Try to read the table using those args, and if an exception occurs then # keep track of the failed guess and move on. for guess_kwargs in filtered_guess_kwargs: t0 = time.time() try: # If guessing will try all Readers then use strict req'ts on column names if 'Reader' not in read_kwargs: guess_kwargs['strict_names'] = True reader = get_reader(**guess_kwargs) reader.guessing = True dat = reader.read(table) _read_trace.append({'kwargs': guess_kwargs, 'Reader': reader.__class__, 'status': 'Success (guessing)', 'dt': '{0:.3f} ms'.format((time.time() - t0) * 1000)}) return dat except guess_exception_classes as err: _read_trace.append({'kwargs': guess_kwargs, 'status': '{0}: {1}'.format(err.__class__.__name__, str(err)), 'dt': '{0:.3f} ms'.format((time.time() - t0) * 1000)}) failed_kwargs.append(guess_kwargs) else: # Failed all guesses, try the original read_kwargs without column requirements try: reader = get_reader(**read_kwargs) dat = reader.read(table) _read_trace.append({'kwargs': read_kwargs, 'Reader': reader.__class__, 'status': 'Success with original kwargs without strict_names ' '(guessing)'}) return dat except guess_exception_classes as err: _read_trace.append({'kwargs': guess_kwargs, 'status': '{0}: {1}'.format(err.__class__.__name__, str(err))}) failed_kwargs.append(read_kwargs) lines = ['\nERROR: Unable to guess table format with the guesses listed below:'] for kwargs in failed_kwargs: sorted_keys = sorted([x for x in sorted(kwargs) if x not in ('Reader', 'Outputter')]) reader_repr = repr(kwargs.get('Reader', basic.Basic)) keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)] kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys) keys_vals.extend(['{}: {!r}'.format(key, val) for key, val in kwargs_sorted]) lines.append(' '.join(keys_vals)) msg = ['', '************************************************************************', '** ERROR: Unable to guess table format with the guesses listed above. **', '** **', '** To figure out why the table did not read, use guess=False and **', '** appropriate arguments to read(). In particular specify the format **', '** and any known attributes like the delimiter. **', '************************************************************************'] lines.extend(msg) raise core.InconsistentTableError('\n'.join(lines)) def _get_guess_kwargs_list(read_kwargs): """ Get the full list of reader keyword argument dicts that are the basis for the format guessing process. The returned full list will then be: - Filtered to be consistent with user-supplied kwargs - Cleaned to have only unique entries - Used one by one to try reading the input table Note that the order of the guess list has been tuned over years of usage. Maintainers need to be very careful about any adjustments as the reasoning may not be immediately evident in all cases. This list can (and usually does) include duplicates. This is a result of the order tuning, but these duplicates get removed later. Parameters ---------- read_kwargs : dict User-supplied read keyword args Returns ------- guess_kwargs_list : list List of read format keyword arg dicts """ guess_kwargs_list = [] # If the table is probably HTML based on some heuristics then start with the # HTML reader. if read_kwargs.pop('guess_html', None): guess_kwargs_list.append(dict(Reader=html.HTML)) # Start with ECSV because an ECSV file will be read by Basic. This format # has very specific header requirements and fails out quickly. if HAS_YAML: guess_kwargs_list.append(dict(Reader=ecsv.Ecsv)) # Now try readers that accept the common arguments with the input arguments # (Unless there are not arguments - we try that in the next step anyway.) # FixedWidthTwoLine would also be read by Basic, so it needs to come first. if len(read_kwargs) > 0: for reader in [fixedwidth.FixedWidthTwoLine, fastbasic.FastBasic, basic.Basic]: first_kwargs = read_kwargs.copy() first_kwargs.update(dict(Reader=reader)) guess_kwargs_list.append(first_kwargs) # Then try a list of readers with default arguments guess_kwargs_list.extend([dict(Reader=fixedwidth.FixedWidthTwoLine), dict(Reader=fastbasic.FastBasic), dict(Reader=basic.Basic), dict(Reader=basic.Rdb), dict(Reader=fastbasic.FastTab), dict(Reader=basic.Tab), dict(Reader=cds.Cds), dict(Reader=daophot.Daophot), dict(Reader=sextractor.SExtractor), dict(Reader=ipac.Ipac), dict(Reader=latex.Latex), dict(Reader=latex.AASTex) ]) # Cycle through the basic-style readers using all combinations of delimiter # and quotechar. for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader, fastbasic.FastBasic, basic.Basic, fastbasic.FastNoHeader, basic.NoHeader): for delimiter in ("|", ",", " ", r"\s"): for quotechar in ('"', "'"): guess_kwargs_list.append(dict( Reader=Reader, delimiter=delimiter, quotechar=quotechar)) return guess_kwargs_list def _read_in_chunks(table, **kwargs): """ For fast_reader read the ``table`` in chunks and vstack to create a single table, OR return a generator of chunk tables. """ fast_reader = kwargs['fast_reader'] chunk_size = fast_reader.pop('chunk_size') chunk_generator = fast_reader.pop('chunk_generator', False) fast_reader['parallel'] = False # No parallel with chunks tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs) if chunk_generator: return tbl_chunks tbl0 = next(tbl_chunks) masked = tbl0.masked # Numpy won't allow resizing the original so make a copy here. out_cols = {col.name: col.data.copy() for col in tbl0.itercols()} str_kinds = ('S', 'U') for tbl in tbl_chunks: masked |= tbl.masked for name, col in tbl.columns.items(): # Concatenate current column data and new column data # If one of the inputs is string-like and the other is not, then # convert the non-string to a string. In a perfect world this would # be handled by numpy, but as of numpy 1.13 this results in a string # dtype that is too long (https://github.com/numpy/numpy/issues/10062). col1, col2 = out_cols[name], col.data if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds: col2 = np.array(col2.tolist(), dtype=col1.dtype.kind) elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds: col1 = np.array(col1.tolist(), dtype=col2.dtype.kind) # Choose either masked or normal concatenation concatenate = np.ma.concatenate if masked else np.concatenate out_cols[name] = concatenate([col1, col2]) # Make final table from numpy arrays, converting dict to list out_cols = [out_cols[name] for name in tbl0.colnames] out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta, copy=False) return out def _read_in_chunks_generator(table, chunk_size, **kwargs): """ For fast_reader read the ``table`` in chunks and return a generator of tables for each chunk. """ @contextlib.contextmanager def passthrough_fileobj(fileobj, encoding=None): """Stub for get_readable_fileobj, which does not seem to work in Py3 for input File-like object, see #6460""" yield fileobj # Set up to coerce `table` input into a readable file object by selecting # an appropriate function. # Convert table-as-string to a File object. Finding a newline implies # that the string is not a filename. if (isinstance(table, str) and ('\n' in table or '\r' in table)): table = StringIO(table) fileobj_context = passthrough_fileobj elif hasattr(table, 'read') and hasattr(table, 'seek'): fileobj_context = passthrough_fileobj else: # string filename or pathlib fileobj_context = get_readable_fileobj # Set up for iterating over chunks kwargs['fast_reader']['return_header_chars'] = True header = '' # Table header (up to start of data) prev_chunk_chars = '' # Chars from previous chunk after last newline first_chunk = True # True for the first chunk, False afterward with fileobj_context(table, encoding=kwargs.get('encoding')) as fh: while True: chunk = fh.read(chunk_size) # Got fewer chars than requested, must be end of file final_chunk = len(chunk) < chunk_size # If this is the last chunk and there is only whitespace then break if final_chunk and not re.search(r'\S', chunk): break # Step backwards from last character in chunk and find first newline for idx in range(len(chunk) - 1, -1, -1): if final_chunk or chunk[idx] == '\n': break else: raise ValueError('no newline found in chunk (chunk_size too small?)') # Stick on the header to the chunk part up to (and including) the # last newline. Make sure the small strings are concatenated first. complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1] prev_chunk_chars = chunk[idx + 1:] # Now read the chunk as a complete table tbl = read(complete_chunk, guess=False, **kwargs) # For the first chunk pop the meta key which contains the header # characters (everything up to the start of data) then fix kwargs # so it doesn't return that in meta any more. if first_chunk: header = tbl.meta.pop('__ascii_fast_reader_header_chars__') first_chunk = False yield tbl if final_chunk: break extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats', 'names', 'include_names', 'exclude_names', 'strip_whitespace') def get_writer(Writer=None, fast_writer=True, **kwargs): """ Initialize a table writer allowing for common customizations. Most of the default behavior for various parameters is determined by the Writer class. Parameters ---------- Writer : ``Writer`` Writer class (DEPRECATED). Defaults to :class:`Basic`. delimiter : str Column delimiter string comment : str String defining a comment line in table quotechar : str One-character string to quote fields containing special characters formats : dict Dictionary of format specifiers or formatting functions strip_whitespace : bool Strip surrounding whitespace from column values. names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fast_writer : bool Whether to use the fast Cython writer. Returns ------- writer : `~astropy.io.ascii.BaseReader` subclass ASCII format writer instance """ if Writer is None: Writer = basic.Basic if 'strip_whitespace' not in kwargs: kwargs['strip_whitespace'] = True writer = core._get_writer(Writer, fast_writer, **kwargs) # Handle the corner case of wanting to disable writing table comments for the # commented_header format. This format *requires* a string for `write_comment` # because that is used for the header column row, so it is not possible to # set the input `comment` to None. Without adding a new keyword or assuming # a default comment character, there is no other option but to tell user to # simply remove the meta['comments']. if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader)) and not isinstance(kwargs.get('comment', ''), str)): raise ValueError("for the commented_header writer you must supply a string\n" "value for the `comment` keyword. In order to disable writing\n" "table comments use `del t.meta['comments']` prior to writing.") return writer def write(table, output=None, format=None, Writer=None, fast_writer=True, *, overwrite=None, **kwargs): """Write the input ``table`` to ``filename``. Most of the default behavior for various parameters is determined by the Writer class. Parameters ---------- table : `~astropy.io.ascii.BaseReader`, array_like, str, file_like, list Input table as a Reader object, Numpy struct array, file name, file-like object, list of strings, or single newline-separated string. output : str, file_like Output [filename, file-like object]. Defaults to``sys.stdout``. format : str Output table format. Defaults to 'basic'. delimiter : str Column delimiter string comment : str String defining a comment line in table quotechar : str One-character string to quote fields containing special characters formats : dict Dictionary of format specifiers or formatting functions strip_whitespace : bool Strip surrounding whitespace from column values. names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fast_writer : bool Whether to use the fast Cython writer. overwrite : bool If ``overwrite=None`` (default) and the file exists, then a warning will be issued. In a future release this will instead generate an exception. If ``overwrite=False`` and the file exists, then an exception is raised. This parameter is ignored when the ``output`` arg is not a string (e.g., a file object). Writer : ``Writer`` Writer class (DEPRECATED). """ if isinstance(output, str): if os.path.lexists(output): if overwrite is None: warnings.warn( "{} already exists. " "Automatically overwriting ASCII files is deprecated. " "Use the argument 'overwrite=True' in the future.".format( output), AstropyDeprecationWarning) elif not overwrite: raise OSError("{} already exists".format(output)) if output is None: output = sys.stdout table_cls = table.__class__ if isinstance(table, Table) else Table table = table_cls(table, names=kwargs.get('names')) table0 = table[:0].copy() core._apply_include_exclude_names(table0, kwargs.get('names'), kwargs.get('include_names'), kwargs.get('exclude_names')) diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames) if diff_format_with_names: warnings.warn( 'The keys {} specified in the formats argument does not match a column name.' .format(diff_format_with_names), AstropyWarning) if table.has_mixin_columns: fast_writer = False Writer = _get_format_class(format, Writer, 'Writer') writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs) if writer._format_name in core.FAST_CLASSES: writer.write(table, output) return lines = writer.write(table) # Write the lines to output outstr = os.linesep.join(lines) if not hasattr(output, 'write'): output = open(output, 'w') output.write(outstr) output.write(os.linesep) output.close() else: output.write(outstr) output.write(os.linesep) def get_read_trace(): """ Return a traceback of the attempted read formats for the last call to `~astropy.io.ascii.read` where guessing was enabled. This is primarily for debugging. The return value is a list of dicts, where each dict includes the keyword args ``kwargs`` used in the read call and the returned ``status``. Returns ------- trace : list of dicts Ordered list of format guesses and status """ return copy.deepcopy(_read_trace)
8dbea67dd1d25ea46f76ee0efc826b405cc406e8c5f82366199e1c150af3f3e8
# Licensed under a 3-clause BSD style license - see LICENSE.rst import re import copy from collections import OrderedDict from . import core from ...table import Table from . import cparser from ...utils import set_locale class FastBasic(metaclass=core.MetaBaseReader): """ This class is intended to handle the same format addressed by the ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C code and is therefore much faster. Unlike the other ASCII readers and writers, this class is not very extensible and is restricted by optimization requirements. """ _format_name = 'fast_basic' _description = 'Basic table with custom delimiter using the fast C engine' _fast = True fill_extra_cols = False guessing = False strict_names = False def __init__(self, default_kwargs={}, **user_kwargs): # Make sure user does not set header_start to None for a reader # that expects a non-None value (i.e. a number >= 0). This mimics # what happens in the Basic reader. if (default_kwargs.get('header_start', 0) is not None and user_kwargs.get('header_start', 0) is None): raise ValueError('header_start cannot be set to None for this Reader') kwargs = default_kwargs.copy() kwargs.update(user_kwargs) # user kwargs take precedence over defaults delimiter = kwargs.pop('delimiter', ' ') self.delimiter = str(delimiter) if delimiter is not None else None self.write_comment = kwargs.get('comment', '# ') self.comment = kwargs.pop('comment', '#') if self.comment is not None: self.comment = str(self.comment) self.quotechar = str(kwargs.pop('quotechar', '"')) self.header_start = kwargs.pop('header_start', 0) # If data_start is not specified, start reading # data right after the header line data_start_default = user_kwargs.get('data_start', self.header_start + 1 if self.header_start is not None else 1) self.data_start = kwargs.pop('data_start', data_start_default) self.kwargs = kwargs self.strip_whitespace_lines = True self.strip_whitespace_fields = True def _read_header(self): # Use the tokenizer by default -- this method # can be overridden for specialized headers self.engine.read_header() def read(self, table): """ Read input data (file-like object, filename, list of strings, or single string) into a Table and return the result. """ if self.comment is not None and len(self.comment) != 1: raise core.ParameterError("The C reader does not support a comment regex") elif self.data_start is None: raise core.ParameterError("The C reader does not allow data_start to be None") elif self.header_start is not None and self.header_start < 0 and \ not isinstance(self, FastCommentedHeader): raise core.ParameterError("The C reader does not allow header_start to be " "negative except for commented-header files") elif self.data_start < 0: raise core.ParameterError("The C reader does not allow data_start to be negative") elif len(self.delimiter) != 1: raise core.ParameterError("The C reader only supports 1-char delimiters") elif len(self.quotechar) != 1: raise core.ParameterError("The C reader only supports a length-1 quote character") elif 'converters' in self.kwargs: raise core.ParameterError("The C reader does not support passing " "specialized converters") elif 'encoding' in self.kwargs: raise core.ParameterError("The C reader does not use the encoding parameter") elif 'Outputter' in self.kwargs: raise core.ParameterError("The C reader does not use the Outputter parameter") elif 'Inputter' in self.kwargs: raise core.ParameterError("The C reader does not use the Inputter parameter") elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs: raise core.ParameterError("The C reader does not use a Splitter class") self.strict_names = self.kwargs.pop('strict_names', False) fast_reader = self.kwargs.get('fast_reader', True) if not isinstance(fast_reader, dict): fast_reader = {} fast_reader.pop('enable', None) self.return_header_chars = fast_reader.pop('return_header_chars', False) self.kwargs['fast_reader'] = fast_reader self.engine = cparser.CParser(table, self.strip_whitespace_lines, self.strip_whitespace_fields, delimiter=self.delimiter, header_start=self.header_start, comment=self.comment, quotechar=self.quotechar, data_start=self.data_start, fill_extra_cols=self.fill_extra_cols, **self.kwargs) conversion_info = self._read_header() self.check_header() if conversion_info is not None: try_int, try_float, try_string = conversion_info else: try_int = {} try_float = {} try_string = {} with set_locale('C'): data, comments = self.engine.read(try_int, try_float, try_string) out = self.make_table(data, comments) if self.return_header_chars: out.meta['__ascii_fast_reader_header_chars__'] = self.engine.header_chars return out def make_table(self, data, comments): """Actually make the output table give the data and comments.""" meta = OrderedDict() if comments: meta['comments'] = comments return Table(data, names=list(self.engine.get_names()), meta=meta) def check_header(self): names = self.engine.get_header_names() or self.engine.get_names() if self.strict_names: # Impose strict requirements on column names (normally used in guessing) bads = [" ", ",", "|", "\t", "'", '"'] for name in names: if (core._is_number(name) or len(name) == 0 or name[0] in bads or name[-1] in bads): raise ValueError('Column name {0!r} does not meet strict name requirements' .format(name)) # When guessing require at least two columns if self.guessing and len(names) <= 1: raise ValueError('Table format guessing requires at least two columns, got {}' .format(names)) def write(self, table, output): """ Use a fast Cython method to write table data to output, where output is a filename or file-like object. """ self._write(table, output, {}) def _write(self, table, output, default_kwargs, header_output=True, output_types=False): write_kwargs = {'delimiter': self.delimiter, 'quotechar': self.quotechar, 'strip_whitespace': self.strip_whitespace_fields, 'comment': self.write_comment } write_kwargs.update(default_kwargs) # user kwargs take precedence over default kwargs write_kwargs.update(self.kwargs) writer = cparser.FastWriter(table, **write_kwargs) writer.write(output, header_output, output_types) class FastCsv(FastBasic): """ A faster version of the ordinary :class:`Csv` writer that uses the optimized C parsing engine. Note that this reader will append empty field values to the end of any row with not enough columns, while :class:`FastBasic` simply raises an error. """ _format_name = 'fast_csv' _description = 'Comma-separated values table using the fast C engine' _fast = True fill_extra_cols = True def __init__(self, **kwargs): super().__init__({'delimiter': ',', 'comment': None}, **kwargs) def write(self, table, output): """ Override the default write method of `FastBasic` to output masked values as empty fields. """ self._write(table, output, {'fill_values': [(core.masked, '')]}) class FastTab(FastBasic): """ A faster version of the ordinary :class:`Tab` reader that uses the optimized C parsing engine. """ _format_name = 'fast_tab' _description = 'Tab-separated values table using the fast C engine' _fast = True def __init__(self, **kwargs): super().__init__({'delimiter': '\t'}, **kwargs) self.strip_whitespace_lines = False self.strip_whitespace_fields = False class FastNoHeader(FastBasic): """ This class uses the fast C engine to read tables with no header line. If the names parameter is unspecified, the columns will be autonamed with "col{}". """ _format_name = 'fast_no_header' _description = 'Basic table with no headers using the fast C engine' _fast = True def __init__(self, **kwargs): super().__init__({'header_start': None, 'data_start': 0}, **kwargs) def write(self, table, output): """ Override the default writing behavior in `FastBasic` so that columns names are not included in output. """ self._write(table, output, {}, header_output=None) class FastCommentedHeader(FastBasic): """ A faster version of the :class:`CommentedHeader` reader, which looks for column names in a commented line. ``header_start`` denotes the index of the header line among all commented lines and is 0 by default. """ _format_name = 'fast_commented_header' _description = 'Columns name in a commented line using the fast C engine' _fast = True def __init__(self, **kwargs): super().__init__({}, **kwargs) # Mimic CommentedHeader's behavior in which data_start # is relative to header_start if unspecified; see #2692 if 'data_start' not in kwargs: self.data_start = 0 def make_table(self, data, comments): """ Actually make the output table give the data and comments. This is slightly different from the base FastBasic method in the way comments are handled. """ meta = OrderedDict() if comments: meta['comments'] = comments[1:] if not meta['comments']: del meta['comments'] return Table(data, names=list(self.engine.get_names()), meta=meta) def _read_header(self): tmp = self.engine.source commented_lines = [] for line in tmp.splitlines(): line = line.lstrip() if line and line[0] == self.comment: # line begins with a comment commented_lines.append(line[1:]) if len(commented_lines) == self.header_start + 1: break if len(commented_lines) <= self.header_start: raise cparser.CParserError('not enough commented lines') self.engine.setup_tokenizer([commented_lines[self.header_start]]) self.engine.header_start = 0 self.engine.read_header() self.engine.setup_tokenizer(tmp) def write(self, table, output): """ Override the default writing behavior in `FastBasic` so that column names are commented. """ self._write(table, output, {}, header_output='comment') class FastRdb(FastBasic): """ A faster version of the :class:`Rdb` reader. This format is similar to tab-delimited, but it also contains a header line after the column name line denoting the type of each column (N for numeric, S for string). """ _format_name = 'fast_rdb' _description = 'Tab-separated with a type definition header line' _fast = True def __init__(self, **kwargs): super().__init__({'delimiter': '\t', 'data_start': 2}, **kwargs) self.strip_whitespace_lines = False self.strip_whitespace_fields = False def _read_header(self): tmp = self.engine.source line1 = '' line2 = '' for line in tmp.splitlines(): # valid non-comment line if not line1 and line.strip() and line.lstrip()[0] != self.comment: line1 = line elif not line2 and line.strip() and line.lstrip()[0] != self.comment: line2 = line break else: # less than 2 lines in table raise ValueError('RDB header requires 2 lines') # tokenize the two header lines separately self.engine.setup_tokenizer([line2]) self.engine.header_start = 0 self.engine.read_header() types = self.engine.get_names() self.engine.setup_tokenizer([line1]) self.engine.set_names([]) self.engine.read_header() if len(self.engine.get_names()) != len(types): raise ValueError('RDB header mismatch between number of ' 'column names and column types') if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types): raise ValueError('RDB type definitions do not all match ' '[num](N|S): {0}'.format(types)) try_int = {} try_float = {} try_string = {} for name, col_type in zip(self.engine.get_names(), types): if col_type[-1].lower() == 's': try_int[name] = 0 try_float[name] = 0 try_string[name] = 1 else: try_int[name] = 1 try_float[name] = 1 try_string[name] = 0 self.engine.setup_tokenizer(tmp) return (try_int, try_float, try_string) def write(self, table, output): """ Override the default writing behavior in `FastBasic` to output a line with column types after the column name line. """ self._write(table, output, {}, output_types=True)
cc5b56b8543be01270148597f5d50fc76c24ca69b9573539a0d2ab60d7b64686
"""A Collection of useful miscellaneous functions. misc.py: Collection of useful miscellaneous functions. :Author: Hannes Breytenbach ([email protected]) """ import collections import itertools import operator def first_true_index(iterable, pred=None, default=None): """find the first index position for the which the callable pred returns True""" if pred is None: func = operator.itemgetter(1) else: func = lambda x: pred(x[1]) ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default return ii[0] if ii else default def first_false_index(iterable, pred=None, default=None): """find the first index position for the which the callable pred returns False""" if pred is None: func = operator.not_ else: func = lambda x: not pred(x) return first_true_index(iterable, func, default) def sortmore(*args, **kw): """ Sorts any number of lists according to: optionally given item sorting key function(s) and/or a global sorting key function. Parameters ---------- One or more lists Keywords -------- globalkey : None revert to sorting by key function globalkey : callable Sort by evaluated value for all items in the lists (call signature of this function needs to be such that it accepts an argument tuple of items from each list. eg.: globalkey = lambda *l: sum(l) will order all the lists by the sum of the items from each list if key: None sorting done by value of first input list (in this case the objects in the first iterable need the comparison methods __lt__ etc...) if key: callable sorting done by value of key(item) for items in first iterable if key: tuple sorting done by value of (key(item_0), ..., key(item_n)) for items in the first n iterables (where n is the length of the key tuple) i.e. the first callable is the primary sorting criterion, and the rest act as tie-breakers. Returns ------- Sorted lists Examples -------- Capture sorting indeces: l = list('CharacterS') In [1]: sortmore( l, range(len(l)) ) Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'], [0, 9, 2, 4, 5, 7, 1, 3, 8, 6]) In [2]: sortmore( l, range(len(l)), key=str.lower ) Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'], [2, 4, 0, 5, 7, 1, 3, 8, 9, 6]) """ first = list(args[0]) if not len(first): return args globalkey = kw.get('globalkey') key = kw.get('key') if key is None: if globalkey: # if global sort function given and no local (secondary) key given, ==> no tiebreakers key = lambda x: 0 else: key = lambda x: x # if no global sort and no local sort keys given, sort by item values if globalkey is None: globalkey = lambda *x: 0 if not isinstance(globalkey, collections.Callable): raise ValueError('globalkey needs to be callable') if isinstance(key, collections.Callable): k = lambda x: (globalkey(*x), key(x[0])) elif isinstance(key, tuple): key = (k if k else lambda x: 0 for k in key) k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x)) else: raise KeyError( "kw arg 'key' should be None, callable, or a sequence of callables, not {}" .format(type(key))) res = sorted(list(zip(*args)), key=k) if 'order' in kw: if kw['order'].startswith(('descend', 'reverse')): res = reversed(res) return tuple(map(list, zip(*res))) def groupmore(func=None, *its): """Extends the itertools.groupby functionality to arbitrary number of iterators.""" if not func: func = lambda x: x its = sortmore(*its, key=func) nfunc = lambda x: func(x[0]) zipper = itertools.groupby(zip(*its), nfunc) unzipper = ((key, zip(*groups)) for key, groups in zipper) return unzipper
d9b27d0bea3259f0c8363a8c0fb0a56ea48f50244c972e5361e639169fe17137
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ An extensible ASCII table reader and writer. Classes to read DAOphot table format :Copyright: Smithsonian Astrophysical Observatory (2011) :Author: Tom Aldcroft ([email protected]) """ import re import numpy as np import itertools as itt from collections import defaultdict, OrderedDict from . import core from . import fixedwidth from .misc import first_true_index, first_false_index, groupmore class DaophotHeader(core.BaseHeader): """ Read the header from a file produced by the IRAF DAOphot routine. """ comment = r'\s*#K' # Regex for extracting the format strings re_format = re.compile(r'%-?(\d+)\.?\d?[sdfg]') re_header_keyword = re.compile(r'[#]K' r'\s+ (?P<name> \w+)' r'\s* = (?P<stuff> .+) $', re.VERBOSE) aperture_values = () def __init__(self): core.BaseHeader.__init__(self) def parse_col_defs(self, grouped_lines_dict): """ Parse a series of column definition lines like below. There may be several such blocks in a single file (where continuation characters have already been stripped). #N ID XCENTER YCENTER MAG MERR MSKY NITER #U ## pixels pixels magnitudes magnitudes counts ## #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d """ line_ids = ('#N', '#U', '#F') coldef_dict = defaultdict(list) # Function to strip identifier lines stripper = lambda s: s[2:].strip(' \\') for defblock in zip(*map(grouped_lines_dict.get, line_ids)): for key, line in zip(line_ids, map(stripper, defblock)): coldef_dict[key].append(line.split()) # Save the original columns so we can use it later to reconstruct the # original header for writing if self.data.is_multiline: # Database contains multi-aperture data. # Autogen column names, units, formats from last row of column headers last_names, last_units, last_formats = list(zip(*map(coldef_dict.get, line_ids)))[-1] N_multiline = len(self.data.first_block) for i in np.arange(1, N_multiline + 1).astype('U2'): # extra column names eg. RAPERT2, SUM2 etc... extended_names = list(map(''.join, zip(last_names, itt.repeat(i)))) if i == '1': # Enumerate the names starting at 1 coldef_dict['#N'][-1] = extended_names else: coldef_dict['#N'].append(extended_names) coldef_dict['#U'].append(last_units) coldef_dict['#F'].append(last_formats) # Get column widths from column format specifiers get_col_width = lambda s: int(self.re_format.search(s).groups()[0]) col_widths = [[get_col_width(f) for f in formats] for formats in coldef_dict['#F']] # original data format might be shorter than 80 characters and filled with spaces row_widths = np.fromiter(map(sum, col_widths), int) row_short = Daophot.table_width - row_widths # fix last column widths for w, r in zip(col_widths, row_short): w[-1] += r self.col_widths = col_widths # merge the multi-line header data into single line data coldef_dict = dict((k, sum(v, [])) for (k, v) in coldef_dict.items()) return coldef_dict def update_meta(self, lines, meta): """ Extract table-level keywords for DAOphot table. These are indicated by a leading '#K ' prefix. """ table_meta = meta['table'] # self.lines = self.get_header_lines(lines) Nlines = len(self.lines) if Nlines > 0: # Group the header lines according to their line identifiers (#K, # #N, #U, #F or just # (spacer line)) function that grabs the line # identifier get_line_id = lambda s: s.split(None, 1)[0] # Group lines by the line identifier ('#N', '#U', '#F', '#K') and # capture line index gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines))) # Groups of lines and their indices grouped_lines, gix = zip(*groups) # Dict of line groups keyed by line identifiers grouped_lines_dict = dict(zip(gid, grouped_lines)) # Update the table_meta keywords if necessary if '#K' in grouped_lines_dict: keywords = OrderedDict(map(self.extract_keyword_line, grouped_lines_dict['#K'])) table_meta['keywords'] = keywords coldef_dict = self.parse_col_defs(grouped_lines_dict) line_ids = ('#N', '#U', '#F') for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)): meta['cols'][name] = {'unit': unit, 'format': fmt} self.meta = meta self.names = coldef_dict['#N'] def extract_keyword_line(self, line): """ Extract info from a header keyword line (#K) """ m = self.re_header_keyword.match(line) if m: vals = m.group('stuff').strip().rsplit(None, 2) keyword_dict = {'units': vals[-2], 'format': vals[-1], 'value': (vals[0] if len(vals) > 2 else "")} return m.group('name'), keyword_dict def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines`` for a DAOphot header. The DAOphot header is specialized so that we just copy the entire BaseHeader get_cols routine and modify as needed. Parameters ---------- lines : list List of table lines Returns ---------- col : list List of table Columns """ if not self.names: raise core.InconsistentTableError('No column names found in DAOphot header') # Create the list of io.ascii column objects self._set_cols_from_names() # Set unit and format as needed. coldefs = self.meta['cols'] for col in self.cols: unit, fmt = map(coldefs[col.name].get, ('unit', 'format')) if unit != '##': col.unit = unit if fmt != '##': col.format = fmt # Set column start and end positions. col_width = sum(self.col_widths, []) ends = np.cumsum(col_width) starts = ends - col_width for i, col in enumerate(self.cols): col.start, col.end = starts[i], ends[i] col.span = col.end - col.start if hasattr(col, 'format'): if any(x in col.format for x in 'fg'): col.type = core.FloatType elif 'd' in col.format: col.type = core.IntType elif 's' in col.format: col.type = core.StrType # INDEF is the missing value marker self.data.fill_values.append(('INDEF', '0')) class DaophotData(core.BaseData): splitter_class = fixedwidth.FixedWidthSplitter start_line = 0 comment = r'\s*#' def __init__(self): core.BaseData.__init__(self) self.is_multiline = False def get_data_lines(self, lines): # Special case for multiline daophot databases. Extract the aperture # values from the first multiline data block if self.is_multiline: # Grab the first column of the special block (aperture values) and # recreate the aperture description string aplist = next(zip(*map(str.split, self.first_block))) self.header.aperture_values = tuple(map(float, aplist)) # Set self.data.data_lines to a slice of lines contain the data rows core.BaseData.get_data_lines(self, lines) class DaophotInputter(core.ContinuationLinesInputter): continuation_char = '\\' multiline_char = '*' replace_char = ' ' re_multiline = re.compile(r'(#?)[^\\*#]*(\*?)(\\*) ?$') def search_multiline(self, lines, depth=150): """ Search lines for special continuation character to determine number of continued rows in a datablock. For efficiency, depth gives the upper limit of lines to search. """ # The list of apertures given in the #K APERTURES keyword may not be # complete!! This happens if the string description of the aperture # list is longer than the field width of the #K APERTURES field. In # this case we have to figure out how many apertures there are based on # the file structure. comment, special, cont = zip(*(self.re_multiline.search(l).groups() for l in lines[:depth])) # Find first non-comment line data_start = first_false_index(comment) # No data in lines[:depth]. This may be because there is no data in # the file, or because the header is really huge. If the latter, # increasing the search depth should help if data_start is None: return None, None, lines[:depth] header_lines = lines[:data_start] # Find first line ending on special row continuation character '*' # indexed relative to data_start first_special = first_true_index(special[data_start:depth]) if first_special is None: # no special lines return None, None, header_lines # last line ending on special '*', but not on line continue '/' last_special = first_false_index(special[data_start + first_special:depth]) # index relative to first_special # if first_special is None: #no end of special lines within search # depth! increase search depth return self.search_multiline( lines, # depth=2*depth ) # indexing now relative to line[0] markers = np.cumsum([data_start, first_special, last_special]) # multiline portion of first data block multiline_block = lines[markers[1]:markers[-1]] return markers, multiline_block, header_lines def process_lines(self, lines): markers, block, header = self.search_multiline(lines) self.data.is_multiline = markers is not None self.data.markers = markers self.data.first_block = block # set the header lines returned by the search as a attribute of the header self.data.header.lines = header if markers is not None: lines = lines[markers[0]:] continuation_char = self.continuation_char multiline_char = self.multiline_char replace_char = self.replace_char parts = [] outlines = [] for i, line in enumerate(lines): mo = self.re_multiline.search(line) if mo: comment, special, cont = mo.groups() if comment or cont: line = line.replace(continuation_char, replace_char) if special: line = line.replace(multiline_char, replace_char) if cont and not comment: parts.append(line) if not cont: parts.append(line) outlines.append(''.join(parts)) parts = [] else: raise ValueError('multiline re could not match line ' '{}: {}'.format(i, line)) return outlines class Daophot(core.BaseReader): """ Read a DAOphot file. Example:: #K MERGERAD = INDEF scaleunit %-23.7g #K IRAF = NOAO/IRAFV2.10EXPORT version %-23s #K USER = davis name %-23s #K HOST = tucana computer %-23s # #N ID XCENTER YCENTER MAG MERR MSKY NITER \\ #U ## pixels pixels magnitudes magnitudes counts ## \\ #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d # #N SHARPNESS CHI PIER PERROR \\ #U ## ## ## perrors \\ #F %-23.3f %-12.3f %-6d %-13s # 14 138.538 INDEF 15.461 0.003 34.85955 4 \\ -0.032 0.802 0 No_error The keywords defined in the #K records are available via the output table ``meta`` attribute:: >>> import os >>> from astropy.io import ascii >>> filename = os.path.join(ascii.__path__[0], 'tests/t/daophot.dat') >>> data = ascii.read(filename) >>> for name, keyword in data.meta['keywords'].items(): ... print(name, keyword['value'], keyword['units'], keyword['format']) ... MERGERAD INDEF scaleunit %-23.7g IRAF NOAO/IRAFV2.10EXPORT version %-23s USER name %-23s ... The unit and formats are available in the output table columns:: >>> for colname in data.colnames: ... col = data[colname] ... print(colname, col.unit, col.format) ... ID None %-9d XCENTER pixels %-10.3f YCENTER pixels %-10.3f ... Any column values of INDEF are interpreted as a missing value and will be masked out in the resultant table. In case of multi-aperture daophot files containing repeated entries for the last row of fields, extra unique column names will be created by suffixing corresponding field names with numbers starting from 2 to N (where N is the total number of apertures). For example, first aperture radius will be RAPERT and corresponding magnitude will be MAG, second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2, third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3, and so on. """ _format_name = 'daophot' _io_registry_format_aliases = ['daophot'] _io_registry_can_write = False _description = 'IRAF DAOphot format table' header_class = DaophotHeader data_class = DaophotData inputter_class = DaophotInputter table_width = 80 def __init__(self): core.BaseReader.__init__(self) # The inputter needs to know about the data (see DaophotInputter.process_lines) self.inputter.data = self.data def write(self, table=None): raise NotImplementedError
bd2145e62e64021e7c5778baaa9b1f09036b80cd9881884c0df45cce7b19f712
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and writing all the meta data associated with an astropy Table object. """ import re from collections import OrderedDict import contextlib from . import core, basic from ...table import meta, serialize from ...utils.data_info import serialize_context_as __doctest_requires__ = {'Ecsv': ['yaml']} ECSV_VERSION = '0.9' DELIMITERS = (' ', ',') class EcsvHeader(basic.BasicHeader): """Header class for which the column definition line starts with the comment character. See the :class:`CommentedHeader` class for an example. """ def process_lines(self, lines): """Return only non-blank lines that start with the comment regexp. For these lines strip out the matching characters and leading/trailing whitespace.""" re_comment = re.compile(self.comment) for line in lines: line = line.strip() if not line: continue match = re_comment.match(line) if match: out = line[match.end():] if out: yield out else: # Stop iterating on first failed match for a non-blank line return def write(self, lines): """ Write header information in the ECSV ASCII format. This format starts with a delimiter separated list of the column names in order to make this format readable by humans and simple csv-type readers. It then encodes the full table meta and column attributes and meta as YAML and pretty-prints this in the header. Finally the delimited column names are repeated again, for humans and readers that look for the *last* comment line as defining the column names. """ if self.splitter.delimiter not in DELIMITERS: raise ValueError('only space and comma are allowed for delimiter in ECSV format') for col in self.cols: if len(getattr(col, 'shape', ())) > 1: raise ValueError("ECSV format does not support multidimensional column '{0}'" .format(col.info.name)) # Now assemble the header dict that will be serialized by the YAML dumper header = {'cols': self.cols, 'schema': 'astropy-2.0'} if self.table_meta: header['meta'] = self.table_meta # Set the delimiter only for the non-default option(s) if self.splitter.delimiter != ' ': header['delimiter'] = self.splitter.delimiter header_yaml_lines = (['%ECSV {0}'.format(ECSV_VERSION), '---'] + meta.get_yaml_from_header(header)) lines.extend([self.write_comment + line for line in header_yaml_lines]) lines.append(self.splitter.join([x.info.name for x in self.cols])) def write_comments(self, lines, meta): """ Override the default write_comments to do nothing since this is handled in the custom write method. """ pass def update_meta(self, lines, meta): """ Override the default update_meta to do nothing. This process is done in get_cols() for this reader. """ pass def get_cols(self, lines): """ Initialize the header Column objects from the table ``lines``. Parameters ---------- lines : list List of table lines """ # Cache a copy of the original input lines before processing below raw_lines = lines # Extract non-blank comment (header) lines with comment character stripped lines = list(self.process_lines(lines)) # Validate that this is a ECSV file ecsv_header_re = r"""%ECSV [ ] (?P<major> \d+) \. (?P<minor> \d+) \.? (?P<bugfix> \d+)? $""" no_header_msg = ('ECSV header line like "# %ECSV <version>" not found as first line.' ' This is required for a ECSV file.') if not lines: raise core.InconsistentTableError(no_header_msg) match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE) if not match: raise core.InconsistentTableError(no_header_msg) # ecsv_version could be constructed here, but it is not currently used. try: header = meta.get_header_from_yaml(lines) except meta.YamlParseError: raise core.InconsistentTableError('unable to parse yaml in meta header') if 'meta' in header: self.table_meta = header['meta'] if 'delimiter' in header: delimiter = header['delimiter'] if delimiter not in DELIMITERS: raise ValueError('only space and comma are allowed for delimiter in ECSV format') self.splitter.delimiter = delimiter self.data.splitter.delimiter = delimiter # Create the list of io.ascii column objects from `header` header_cols = OrderedDict((x['name'], x) for x in header['datatype']) self.names = [x['name'] for x in header['datatype']] # Read the first non-commented line of table and split to get the CSV # header column names. This is essentially what the Basic reader does. header_line = next(super().process_lines(raw_lines)) header_names = next(self.splitter([header_line])) # Check for consistency of the ECSV vs. CSV header column names if header_names != self.names: raise ValueError('column names from ECSV header {} do not ' 'match names from header line of CSV data {}' .format(self.names, header_names)) # BaseHeader method to create self.cols, which is a list of # io.ascii.core.Column objects (*not* Table Column objects). self._set_cols_from_names() # Transfer attributes from the column descriptor stored in the input # header YAML metadata to the new columns to create this table. for col in self.cols: for attr in ('description', 'format', 'unit', 'meta'): if attr in header_cols[col.name]: setattr(col, attr, header_cols[col.name][attr]) col.dtype = header_cols[col.name]['datatype'] # ECSV "string" means numpy dtype.kind == 'U' AKA str in Python 3 if col.dtype == 'string': col.dtype = 'str' if col.dtype.startswith('complex'): raise TypeError('ecsv reader does not support complex number types') class EcsvOutputter(core.TableOutputter): """ After reading the input lines and processing, convert the Reader columns and metadata to an astropy.table.Table object. This overrides the default converters to be an empty list because there is no "guessing" of the conversion function. """ default_converters = [] def __call__(self, cols, meta): # Convert to a Table with all plain Column subclass columns out = super().__call__(cols, meta) # If mixin columns exist (based on the special '__mixin_columns__' # key in the table ``meta``), then use that information to construct # appropriate mixin columns and remove the original data columns. # If no __mixin_columns__ exists then this function just passes back # the input table. out = serialize._construct_mixins_from_columns(out) return out class Ecsv(basic.Basic): """ Read a file which conforms to the ECSV (Enhanced Character Separated Values) format. This format allows for specification of key table and column meta-data, in particular the data type and unit. For details see: https://github.com/astropy/astropy-APEs/blob/master/APE6.rst. Examples -------- >>> from astropy.table import Table >>> ecsv_content = '''# %ECSV 0.9 ... # --- ... # datatype: ... # - {name: a, unit: m / s, datatype: int64, format: '%03d'} ... # - {name: b, unit: km, datatype: int64, description: This is column b} ... a b ... 001 2 ... 004 3 ... ''' >>> Table.read(ecsv_content, format='ascii.ecsv') <Table length=2> a b m / s km int64 int64 ----- ----- 001 2 004 3 """ _format_name = 'ecsv' _description = 'Enhanced CSV' _io_registry_suffix = '.ecsv' header_class = EcsvHeader outputter_class = EcsvOutputter def update_table_data(self, table): """ Update table columns in place if mixin columns are present. This is a hook to allow updating the table columns after name filtering but before setting up to write the data. This is currently only used by ECSV and is otherwise just a pass-through. Parameters ---------- table : `astropy.table.Table` Input table for writing Returns ------- table : `astropy.table.Table` Output table for writing """ with serialize_context_as('ecsv'): out = serialize._represent_mixins_as_columns(table) return out
facc04648b90e6548f9f85ae6865f71bfef9a62491695b3140523317b5f0dd2d
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects any readers/writers defined in io.misc to the # astropy.table.Table class from . import hdf5 hdf5.register_hdf5()
449c99bd91aae940461176fd46c9d5c9b8e547ce3d34815ec1ad3c41d103fa31
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package contains functions for reading and writing HDF5 tables that are not meant to be used directly, but instead are available as readers/writers in `astropy.table`. See :ref:`table_io` for more details. """ import os import warnings import numpy as np # NOTE: Do not import anything from astropy.table here. # https://github.com/astropy/astropy/issues/6604 from ...utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n' META_KEY = '__table_column_meta__' __all__ = ['read_table_hdf5', 'write_table_hdf5'] def meta_path(path): return path + '.' + META_KEY def _find_all_structured_arrays(handle): """ Find all structured arrays in an HDF5 file """ import h5py structured_arrays = [] def append_structured_arrays(name, obj): if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V': structured_arrays.append(name) handle.visititems(append_structured_arrays) return structured_arrays def is_hdf5(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() try: signature = fileobj.read(8) finally: fileobj.seek(loc) return signature == HDF5_SIGNATURE elif filepath is not None: return filepath.endswith(('.hdf5', '.h5')) try: import h5py except ImportError: return False else: return isinstance(args[0], (h5py.highlevel.File, h5py.highlevel.Group, h5py.highlevel.Dataset)) def read_table_hdf5(input, path=None): """ Read a Table object from an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one table is present in the HDF5 file or group, the first table is read in and a warning is displayed. Parameters ---------- input : str or :class:`h5py:File` or :class:`h5py:Group` or :class:`h5py:Dataset` If a string, the filename to read the table from. If an h5py object, either the file or the group object to read the table from. path : str The path from which to read the table inside the HDF5 file. This should be relative to the input file or group. """ try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") # This function is iterative, and only gets to writing the file when # the input is an hdf5 Group. Moreover, the input variable is changed in # place. # Here, we save its value to be used at the end when the conditions are # right. input_save = input if isinstance(input, (h5py.highlevel.File, h5py.highlevel.Group)): # If a path was specified, follow the path if path is not None: try: input = input[path] except (KeyError, ValueError): raise OSError("Path {0} does not exist".format(path)) # `input` is now either a group or a dataset. If it is a group, we # will search for all structured arrays inside the group, and if there # is one we can proceed otherwise an error is raised. If it is a # dataset, we just proceed with the reading. if isinstance(input, h5py.highlevel.Group): # Find all structured arrays in group arrays = _find_all_structured_arrays(input) if len(arrays) == 0: raise ValueError("no table found in HDF5 group {0}". format(path)) elif len(arrays) > 0: path = arrays[0] if path is None else path + '/' + arrays[0] warnings.warn("path= was not specified but multiple tables" " are present, reading in first available" " table (path={0})".format(path), AstropyUserWarning) return read_table_hdf5(input, path=path) elif not isinstance(input, h5py.highlevel.Dataset): # If a file object was passed, then we need to extract the filename # because h5py cannot properly read in file objects. if hasattr(input, 'read'): try: input = input.name except AttributeError: raise TypeError("h5py can only open regular files") # Open the file for reading, and recursively call read_table_hdf5 with # the file object and the path. f = h5py.File(input, 'r') try: return read_table_hdf5(f, path=path) finally: f.close() # If we are here, `input` should be a Dataset object, which we can now # convert to a Table. # Create a Table object from ...table import Table, meta, serialize table = Table(np.array(input)) # Read the meta-data from the file. For back-compatibility, we can read # the old file format where the serialized metadata were saved in the # attributes of the HDF5 dataset. # In the new format, instead, metadata are stored in a new dataset in the # same file. This is introduced in Astropy 3.0 old_version_meta = META_KEY in input.attrs new_version_meta = path is not None and meta_path(path) in input_save if old_version_meta or new_version_meta: if new_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input_save[meta_path(path)]) elif old_version_meta: header = meta.get_header_from_yaml( h.decode('utf-8') for h in input.attrs[META_KEY]) if 'meta' in list(header.keys()): table.meta = header['meta'] header_cols = dict((x['name'], x) for x in header['datatype']) for col in table.columns.values(): for attr in ('description', 'format', 'unit', 'meta'): if attr in header_cols[col.name]: setattr(col, attr, header_cols[col.name][attr]) # Construct new table with mixins, using tbl.meta['__serialized_columns__'] # as guidance. table = serialize._construct_mixins_from_columns(table) else: # Read the meta-data from the file table.meta.update(input.attrs) return table def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from ...table import serialize from ...table.table import has_info_class from ... import units as u from ...utils.data_info import MixinInfo, serialize_context_as # If PyYAML is not available then check to see if there are any mixin cols # that *require* YAML serialization. HDF5 already has support for # Quantity, so if those are the only mixins the proceed without doing the # YAML bit, for backward compatibility (i.e. not requiring YAML to write # Quantity). try: import yaml except ImportError: for col in tbl.itercols(): if (has_info_class(col, MixinInfo) and col.__class__ is not u.Quantity): raise TypeError("cannot write type {} column '{}' " "to HDF5 without PyYAML installed." .format(col.__class__.__name__, col.info.name)) # Convert the table to one with no mixins, only Column objects. This adds # meta data which is extracted with meta.get_yaml_from_table. with serialize_context_as('hdf5'): encode_tbl = serialize._represent_mixins_as_columns(tbl) return encode_tbl def write_table_hdf5(table, output, path=None, compression=False, append=False, overwrite=False, serialize_meta=False, compatibility_mode=False): """ Write a Table object to an HDF5 file This requires `h5py <http://www.h5py.org/>`_ to be installed. Parameters ---------- table : `~astropy.table.Table` Data table that is to be written to file. output : str or :class:`h5py:File` or :class:`h5py:Group` If a string, the filename to write the table to. If an h5py object, either the file or the group object to write the table to. path : str The path to which to write the table inside the HDF5 file. This should be relative to the input file or group. compression : bool or str or int Whether to compress the table inside the HDF5 file. If set to `True`, ``'gzip'`` compression is used. If a string is specified, it should be one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is specified (in the range 0-9), ``'gzip'`` compression is used, and the integer denotes the compression level. append : bool Whether to append the table to an existing HDF5 file. overwrite : bool Whether to overwrite any existing file without warning. If ``append=True`` and ``overwrite=True`` then only the dataset will be replaced; the file/group will not be overwritten. """ from ...table import meta try: import h5py except ImportError: raise Exception("h5py is required to read and write HDF5 files") if path is None: raise ValueError("table path should be set via the path= argument") elif path.endswith('/'): raise ValueError("table path should end with table name, not /") if '/' in path: group, name = path.rsplit('/', 1) else: group, name = None, path if isinstance(output, (h5py.highlevel.File, h5py.highlevel.Group)): if group: try: output_group = output[group] except (KeyError, ValueError): output_group = output.create_group(group) else: output_group = output elif isinstance(output, str): if os.path.exists(output) and not append: if overwrite and not append: os.remove(output) else: raise OSError("File exists: {0}".format(output)) # Open the file for appending or writing f = h5py.File(output, 'a' if append else 'w') # Recursively call the write function try: return write_table_hdf5(table, f, path=path, compression=compression, append=append, overwrite=overwrite, serialize_meta=serialize_meta, compatibility_mode=compatibility_mode) finally: f.close() else: raise TypeError('output should be a string or an h5py File or ' 'Group object') # Check whether table already exists if name in output_group: if append and overwrite: # Delete only the dataset itself del output_group[name] else: raise OSError("Table {0} already exists".format(path)) # Encode any mixin columns as plain columns + appropriate metadata table = _encode_mixins(table) # Warn if information will be lost when serialize_meta=False. This is # hardcoded to the set difference between column info attributes and what # HDF5 can store natively (name, dtype) with no meta. if serialize_meta is False: for col in table.itercols(): for attr in ('unit', 'format', 'description', 'meta'): if getattr(col.info, attr, None) not in (None, {}): warnings.warn("table contains column(s) with defined 'unit', 'format'," " 'description', or 'meta' info attributes. These will" " be dropped since serialize_meta=False.", AstropyUserWarning) # Write the table to the file if compression: if compression is True: compression = 'gzip' dset = output_group.create_dataset(name, data=table.as_array(), compression=compression) else: dset = output_group.create_dataset(name, data=table.as_array()) if serialize_meta: header_yaml = meta.get_yaml_from_table(table) header_encoded = [h.encode('utf-8') for h in header_yaml] if compatibility_mode: warnings.warn("compatibility mode for writing is deprecated", AstropyDeprecationWarning) try: dset.attrs[META_KEY] = header_encoded except Exception as e: warnings.warn( "Attributes could not be written to the output HDF5 " "file: {0}".format(e)) else: output_group.create_dataset(meta_path(name), data=header_encoded) else: # Write the Table meta dict key:value pairs to the file as HDF5 # attributes. This works only for a limited set of scalar data types # like numbers, strings, etc., but not any complex types. This path # also ignores column meta like unit or format. for key in table.meta: val = table.meta[key] try: dset.attrs[key] = val except TypeError: warnings.warn("Attribute `{0}` of type {1} cannot be written to " "HDF5 files - skipping. (Consider specifying " "serialize_meta=True to write all meta data)".format(key, type(val)), AstropyUserWarning) def register_hdf5(): """ Register HDF5 with Unified I/O. """ from .. import registry as io_registry from ...table import Table io_registry.register_reader('hdf5', Table, read_table_hdf5) io_registry.register_writer('hdf5', Table, write_table_hdf5) io_registry.register_identifier('hdf5', Table, is_hdf5)
c1f72d23fe909fbfa6d72701788d7b2e5b26c252d2a6d4bd5384971066fb5e45
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions for serializing core astropy objects via the YAML protocol. It provides functions `~astropy.io.misc.yaml.dump`, `~astropy.io.misc.yaml.load`, and `~astropy.io.misc.yaml.load_all` which call the corresponding functions in `PyYaml <http://pyyaml.org>`_ but use the `~astropy.io.misc.yaml.AstropyDumper` and `~astropy.io.misc.yaml.AstropyLoader` classes to define custom YAML tags for the following astropy classes: - `astropy.units.Unit` - `astropy.units.Quantity` - `astropy.time.Time` - `astropy.time.TimeDelta` - `astropy.coordinates.SkyCoord` - `astropy.coordinates.Angle` - `astropy.coordinates.Latitude` - `astropy.coordinates.Longitude` - `astropy.coordinates.EarthLocation` - `astropy.table.SerializedColumn` .. Note :: This module requires PyYaml version 3.12 or later. Example ======= :: >>> from astropy.io.misc import yaml >>> import astropy.units as u >>> from astropy.time import Time >>> from astropy.coordinates import EarthLocation >>> t = Time(2457389.0, format='mjd', ... location=EarthLocation(1000, 2000, 3000, unit=u.km)) >>> td = yaml.dump(t) >>> print(td) !astropy.time.Time format: mjd in_subfmt: '*' jd1: 4857390.0 jd2: -0.5 location: !astropy.coordinates.earth.EarthLocation ellipsoid: WGS84 x: !astropy.units.Quantity unit: &id001 !astropy.units.Unit {unit: km} value: 1000.0 y: !astropy.units.Quantity unit: *id001 value: 2000.0 z: !astropy.units.Quantity unit: *id001 value: 3000.0 out_subfmt: '*' precision: 3 scale: utc >>> ty = yaml.load(td) >>> ty <Time object: scale='utc' format='mjd' value=2457389.0> >>> ty.location # doctest: +FLOAT_CMP <EarthLocation (1000., 2000., 3000.) km> """ import base64 import numpy as np from ...time import Time, TimeDelta from ... import units as u from ... import coordinates as coords from ...utils import minversion from ...table import SerializedColumn try: import yaml except ImportError: raise ImportError('`import yaml` failed, PyYAML package is required for YAML') YAML_LT_3_12 = not minversion(yaml, '3.12') __all__ = ['AstropyLoader', 'AstropyDumper', 'load', 'load_all', 'dump'] def _unit_representer(dumper, obj): out = {'unit': str(obj.to_string())} return dumper.represent_mapping('!astropy.units.Unit', out) def _unit_constructor(loader, node): map = loader.construct_mapping(node) return u.Unit(map['unit']) def _serialized_column_representer(dumper, obj): out = dumper.represent_mapping('!astropy.table.SerializedColumn', obj) return out def _serialized_column_constructor(loader, node): map = loader.construct_mapping(node) return SerializedColumn(map) def _time_representer(dumper, obj): out = obj.info._represent_as_dict() return dumper.represent_mapping('!astropy.time.Time', out) def _time_constructor(loader, node): map = loader.construct_mapping(node) out = Time.info._construct_from_dict(map) return out def _timedelta_representer(dumper, obj): out = obj.info._represent_as_dict() return dumper.represent_mapping('!astropy.time.TimeDelta', out) def _timedelta_constructor(loader, node): map = loader.construct_mapping(node) out = TimeDelta.info._construct_from_dict(map) return out def _ndarray_representer(dumper, obj): if not (obj.flags['C_CONTIGUOUS'] or obj.flags['F_CONTIGUOUS']): obj = np.ascontiguousarray(obj) if np.isfortran(obj): obj = obj.T order = 'F' else: order = 'C' data_b64 = base64.b64encode(obj.tostring()) out = dict(buffer=data_b64, dtype=str(obj.dtype), shape=obj.shape, order=order) return dumper.represent_mapping('!numpy.ndarray', out) def _ndarray_constructor(loader, node): map = loader.construct_mapping(node) map['buffer'] = base64.b64decode(map['buffer']) return np.ndarray(**map) def _quantity_representer(tag): def representer(dumper, obj): out = obj.info._represent_as_dict() return dumper.represent_mapping(tag, out) return representer def _quantity_constructor(cls): def constructor(loader, node): map = loader.construct_mapping(node) return cls.info._construct_from_dict(map) return constructor def _skycoord_representer(dumper, obj): map = obj.info._represent_as_dict() out = dumper.represent_mapping('!astropy.coordinates.sky_coordinate.SkyCoord', map) return out def _skycoord_constructor(loader, node): map = loader.construct_mapping(node) out = coords.SkyCoord.info._construct_from_dict(map) return out # Straight from yaml's Representer def _complex_representer(self, data): if data.imag == 0.0: data = u'%r' % data.real elif data.real == 0.0: data = u'%rj' % data.imag elif data.imag > 0: data = u'%r+%rj' % (data.real, data.imag) else: data = u'%r%rj' % (data.real, data.imag) return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) def _complex_constructor(loader, node): map = loader.construct_scalar(node) return complex(map) class AstropyLoader(yaml.SafeLoader): """ Custom SafeLoader that constructs astropy core objects as well as Python tuple and unicode objects. This class is not directly instantiated by user code, but instead is used to maintain the available constructor functions that are called when parsing a YAML stream. See the `PyYaml documentation <http://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the class signature. """ def _construct_python_tuple(self, node): return tuple(self.construct_sequence(node)) def _construct_python_unicode(self, node): return self.construct_scalar(node) class AstropyDumper(yaml.SafeDumper): """ Custom SafeDumper that represents astropy core objects as well as Python tuple and unicode objects. This class is not directly instantiated by user code, but instead is used to maintain the available representer functions that are called when generating a YAML stream from an object. See the `PyYaml documentation <http://pyyaml.org/wiki/PyYAMLDocumentation>`_ for details of the class signature. """ def _represent_tuple(self, data): return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) if YAML_LT_3_12: # pre-3.12, ignore-aliases could not deal with ndarray, so we backport # the more recent ignore_alises definition. def ignore_aliases(self, data): if data is None: return True if isinstance(data, tuple) and data == (): return True if isinstance(data, (str, bool, int, float)): return True AstropyDumper.add_representer(u.IrreducibleUnit, _unit_representer) AstropyDumper.add_representer(u.CompositeUnit, _unit_representer) AstropyDumper.add_multi_representer(u.Unit, _unit_representer) AstropyDumper.add_representer(tuple, AstropyDumper._represent_tuple) AstropyDumper.add_representer(np.ndarray, _ndarray_representer) AstropyDumper.add_representer(Time, _time_representer) AstropyDumper.add_representer(TimeDelta, _timedelta_representer) AstropyDumper.add_representer(coords.SkyCoord, _skycoord_representer) AstropyDumper.add_representer(SerializedColumn, _serialized_column_representer) # Numpy dtypes AstropyDumper.add_representer(np.bool_, yaml.representer.SafeRepresenter.represent_bool) for np_type in [np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]: AstropyDumper.add_representer(np_type, yaml.representer.SafeRepresenter.represent_int) for np_type in [np.float_, np.float16, np.float32, np.float64, np.longdouble]: AstropyDumper.add_representer(np_type, yaml.representer.SafeRepresenter.represent_float) for np_type in [np.complex_, complex, np.complex64, np.complex128]: AstropyDumper.add_representer(np_type, _complex_representer) AstropyLoader.add_constructor(u'tag:yaml.org,2002:python/complex', _complex_constructor) AstropyLoader.add_constructor('tag:yaml.org,2002:python/tuple', AstropyLoader._construct_python_tuple) AstropyLoader.add_constructor('tag:yaml.org,2002:python/unicode', AstropyLoader._construct_python_unicode) AstropyLoader.add_constructor('!astropy.units.Unit', _unit_constructor) AstropyLoader.add_constructor('!numpy.ndarray', _ndarray_constructor) AstropyLoader.add_constructor('!astropy.time.Time', _time_constructor) AstropyLoader.add_constructor('!astropy.time.TimeDelta', _timedelta_constructor) AstropyLoader.add_constructor('!astropy.coordinates.sky_coordinate.SkyCoord', _skycoord_constructor) AstropyLoader.add_constructor('!astropy.table.SerializedColumn', _serialized_column_constructor) for cls, tag in ((u.Quantity, '!astropy.units.Quantity'), (coords.Angle, '!astropy.coordinates.Angle'), (coords.Latitude, '!astropy.coordinates.Latitude'), (coords.Longitude, '!astropy.coordinates.Longitude'), (coords.EarthLocation, '!astropy.coordinates.earth.EarthLocation')): AstropyDumper.add_multi_representer(cls, _quantity_representer(tag)) AstropyLoader.add_constructor(tag, _quantity_constructor(cls)) def load(stream): """Parse the first YAML document in a stream using the AstropyLoader and produce the corresponding Python object. Parameters ---------- stream : str or file-like object YAML input Returns ------- obj : object Object corresponding to YAML document """ return yaml.load(stream, Loader=AstropyLoader) def load_all(stream): """Parse the all YAML documents in a stream using the AstropyLoader class and produce the corresponding Python object. Parameters ---------- stream : str or file-like object YAML input Returns ------- obj : object Object corresponding to YAML document """ return yaml.load_all(stream, Loader=AstropyLoader) def dump(data, stream=None, **kwargs): """Serialize a Python object into a YAML stream using the AstropyDumper class. If stream is None, return the produced string instead. Parameters ---------- data: object Object to serialize to YAML stream : file-like object, optional YAML output (if not supplied a string is returned) **kwargs Other keyword arguments that get passed to yaml.dump() Returns ------- out : str or None If no ``stream`` is supplied then YAML output is returned as str """ kwargs['Dumper'] = AstropyDumper return yaml.dump(data, stream=stream, **kwargs)
088e13cbde682ab75c99bc9b3ef5e4da1ccc3a191497002a7042660286cbd03d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple input/output related functionality that is not part of a larger framework or standard. """ import warnings from ...utils.exceptions import AstropyDeprecationWarning, NoValue __all__ = ['fnpickle', 'fnunpickle'] def fnunpickle(fileorname, number=0, usecPickle=NoValue): """ Unpickle pickled objects from a specified file and return the contents. Parameters ---------- fileorname : str or file-like The file name or file from which to unpickle objects. If a file object, it should have been opened in binary mode. number : int If 0, a single object will be returned (the first in the file). If >0, this specifies the number of objects to be unpickled, and a list will be returned with exactly that many objects. If <0, all objects in the file will be unpickled and returned as a list. Raises ------ EOFError If ``number`` is >0 and there are fewer than ``number`` objects in the pickled file. Returns ------- contents : obj or list If ``number`` is 0, this is a individual object - the first one unpickled from the file. Otherwise, it is a list of objects unpickled from the file. """ if usecPickle is not NoValue: warnings.warn('The "usecPickle" keyword is now deprecated.', AstropyDeprecationWarning) import pickle if isinstance(fileorname, str): f = open(fileorname, 'rb') close = True else: f = fileorname close = False try: if number > 0: # get that number res = [] for i in range(number): res.append(pickle.load(f)) elif number < 0: # get all objects res = [] eof = False while not eof: try: res.append(pickle.load(f)) except EOFError: eof = True else: # number==0 res = pickle.load(f) finally: if close: f.close() return res def fnpickle(object, fileorname, usecPickle=NoValue, protocol=None, append=False): """Pickle an object to a specified file. Parameters ---------- object The python object to pickle. fileorname : str or file-like The filename or file into which the `object` should be pickled. If a file object, it should have been opened in binary mode. protocol : int or None Pickle protocol to use - see the :mod:`pickle` module for details on these options. If None, the most recent protocol will be used. append : bool If True, the object is appended to the end of the file, otherwise the file will be overwritten (if a file object is given instead of a file name, this has no effect). """ if usecPickle is not NoValue: warnings.warn('The "usecPickle" keyword is now deprecated.', AstropyDeprecationWarning) import pickle if protocol is None: protocol = pickle.HIGHEST_PROTOCOL if isinstance(fileorname, str): f = open(fileorname, 'ab' if append else 'wb') close = True else: f = fileorname close = False try: pickle.dump(object, f, protocol=protocol) finally: if close: f.close()
77e086eaf4a89c8edf1f96d03e97473338cf7aa13d5285317bef7a1ec8025cd2
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from copy import copy from io import StringIO import pytest import numpy as np from ..registry import _readers, _writers, _identifiers from .. import registry as io_registry from ...table import Table _READERS_ORIGINAL = copy(_readers) _WRITERS_ORIGINAL = copy(_writers) _IDENTIFIERS_ORIGINAL = copy(_identifiers) class TestData: read = classmethod(io_registry.read) write = io_registry.write def setup_function(function): _readers.clear() _writers.clear() _identifiers.clear() def empty_reader(*args, **kwargs): return TestData() def empty_writer(table, *args, **kwargs): pass def empty_identifier(*args, **kwargs): return True def test_get_reader_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.get_reader('test', TestData) assert str(exc.value).startswith( "No reader defined for format 'test' and class 'TestData'") def test_get_writer_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.get_writer('test', TestData) assert str(exc.value).startswith( "No writer defined for format 'test' and class 'TestData'") def test_register_reader(): io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_reader('test2', TestData, empty_reader) assert io_registry.get_reader('test1', TestData) == empty_reader assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test1', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test1', TestData) assert io_registry.get_reader('test2', TestData) == empty_reader io_registry.unregister_reader('test2', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_reader('test2', TestData) def test_register_writer(): io_registry.register_writer('test1', TestData, empty_writer) io_registry.register_writer('test2', TestData, empty_writer) assert io_registry.get_writer('test1', TestData) == empty_writer assert io_registry.get_writer('test2', TestData) == empty_writer io_registry.unregister_writer('test1', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_writer('test1', TestData) assert io_registry.get_writer('test2', TestData) == empty_writer io_registry.unregister_writer('test2', TestData) with pytest.raises(io_registry.IORegistryError): io_registry.get_writer('test2', TestData) def test_register_identifier(): io_registry.register_identifier('test1', TestData, empty_identifier) io_registry.register_identifier('test2', TestData, empty_identifier) io_registry.unregister_identifier('test1', TestData) io_registry.unregister_identifier('test2', TestData) def test_register_reader_invalid(): io_registry.register_reader('test', TestData, empty_reader) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_reader('test', TestData, empty_reader) assert (str(exc.value) == "Reader for format 'test' and class 'TestData' " "is already defined") def test_register_writer_invalid(): io_registry.register_writer('test', TestData, empty_writer) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_writer('test', TestData, empty_writer) assert (str(exc.value) == "Writer for format 'test' and class 'TestData' " "is already defined") def test_register_identifier_invalid(): io_registry.register_identifier('test', TestData, empty_identifier) with pytest.raises(io_registry.IORegistryError) as exc: io_registry.register_identifier('test', TestData, empty_identifier) assert (str(exc.value) == "Identifier for format 'test' and class " "'TestData' is already defined") def test_unregister_reader_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_reader('test', TestData) assert str(exc.value) == "No reader defined for format 'test' and class 'TestData'" def test_unregister_writer_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_writer('test', TestData) assert str(exc.value) == "No writer defined for format 'test' and class 'TestData'" def test_unregister_identifier_invalid(): with pytest.raises(io_registry.IORegistryError) as exc: io_registry.unregister_identifier('test', TestData) assert str(exc.value) == "No identifier defined for format 'test' and class 'TestData'" def test_register_reader_force(): io_registry.register_reader('test', TestData, empty_reader) io_registry.register_reader('test', TestData, empty_reader, force=True) def test_register_writer_force(): io_registry.register_writer('test', TestData, empty_writer) io_registry.register_writer('test', TestData, empty_writer, force=True) def test_register_identifier_force(): io_registry.register_identifier('test', TestData, empty_identifier) io_registry.register_identifier('test', TestData, empty_identifier, force=True) def test_read_noformat(): with pytest.raises(io_registry.IORegistryError) as exc: TestData.read() assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat(): with pytest.raises(io_registry.IORegistryError) as exc: TestData().write() assert str(exc.value).startswith("Format could not be identified.") def test_read_noformat_arbitrary(): """Test that all identifier functions can accept arbitrary input""" _identifiers.update(_IDENTIFIERS_ORIGINAL) with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(object()) assert str(exc.value).startswith("Format could not be identified.") def test_read_noformat_arbitrary_file(tmpdir): """Tests that all identifier functions can accept arbitrary files""" _readers.update(_READERS_ORIGINAL) testfile = str(tmpdir.join('foo.example')) with open(testfile, 'w') as f: f.write("Hello world") with pytest.raises(io_registry.IORegistryError) as exc: Table.read(testfile) assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat_arbitrary(): """Test that all identifier functions can accept arbitrary input""" _identifiers.update(_IDENTIFIERS_ORIGINAL) with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(object()) assert str(exc.value).startswith("Format could not be identified.") def test_write_noformat_arbitrary_file(tmpdir): """Tests that all identifier functions can accept arbitrary files""" _writers.update(_WRITERS_ORIGINAL) testfile = str(tmpdir.join('foo.example')) with pytest.raises(io_registry.IORegistryError) as exc: Table().write(testfile) assert str(exc.value).startswith("Format could not be identified.") def test_read_toomanyformats(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True) with pytest.raises(io_registry.IORegistryError) as exc: TestData.read() assert str(exc.value) == "Format is ambiguous - options are: test1, test2" def test_write_toomanyformats(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True) with pytest.raises(io_registry.IORegistryError) as exc: TestData().write() assert str(exc.value) == "Format is ambiguous - options are: test1, test2" def test_read_format_noreader(): with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(format='test') assert str(exc.value).startswith( "No reader defined for format 'test' and class 'TestData'") def test_write_format_nowriter(): with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(format='test') assert str(exc.value).startswith( "No writer defined for format 'test' and class 'TestData'") def test_read_identifier(tmpdir): io_registry.register_identifier( 'test1', TestData, lambda o, path, fileobj, *x, **y: path.endswith('a')) io_registry.register_identifier( 'test2', TestData, lambda o, path, fileobj, *x, **y: path.endswith('b')) # Now check that we got past the identifier and are trying to get # the reader. The io_registry.get_reader will fail but the error message # will tell us if the identifier worked. filename = tmpdir.join("testfile.a").strpath open(filename, 'w').close() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(filename) assert str(exc.value).startswith( "No reader defined for format 'test1' and class 'TestData'") filename = tmpdir.join("testfile.b").strpath open(filename, 'w').close() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(filename) assert str(exc.value).startswith( "No reader defined for format 'test2' and class 'TestData'") def test_write_identifier(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: x[0].startswith('a')) io_registry.register_identifier('test2', TestData, lambda o, *x, **y: x[0].startswith('b')) # Now check that we got past the identifier and are trying to get # the reader. The io_registry.get_writer will fail but the error message # will tell us if the identifier worked. with pytest.raises(io_registry.IORegistryError) as exc: TestData().write('abc') assert str(exc.value).startswith( "No writer defined for format 'test1' and class 'TestData'") with pytest.raises(io_registry.IORegistryError) as exc: TestData().write('bac') assert str(exc.value).startswith( "No writer defined for format 'test2' and class 'TestData'") def test_identifier_origin(): io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read') io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write') io_registry.register_reader('test1', TestData, empty_reader) io_registry.register_writer('test2', TestData, empty_writer) # There should not be too many formats defined TestData.read() TestData().write() with pytest.raises(io_registry.IORegistryError) as exc: TestData.read(format='test2') assert str(exc.value).startswith( "No reader defined for format 'test2' and class 'TestData'") with pytest.raises(io_registry.IORegistryError) as exc: TestData().write(format='test1') assert str(exc.value).startswith( "No writer defined for format 'test1' and class 'TestData'") def test_read_valid_return(): io_registry.register_reader('test', TestData, lambda: TestData()) t = TestData.read(format='test') assert isinstance(t, TestData) def test_read_invalid_return(): io_registry.register_reader('test', TestData, lambda: 'spam') with pytest.raises(TypeError) as exc: TestData.read(format='test') assert str(exc.value) == "reader should return a TestData instance" def test_non_existing_unknown_ext(): """Raise the correct error when attempting to read a non-existing file with an unknown extension.""" with pytest.raises(OSError): data = Table.read('non-existing-file-with-unknown.ext') def test_read_basic_table(): data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])), dtype=[(str('A'), int), (str('B'), '|U1')]) io_registry.register_reader('test', Table, lambda x: Table(x)) t = Table.read(data, format='test') assert t.keys() == ['A', 'B'] for i in range(3): assert t['A'][i] == data['A'][i] assert t['B'][i] == data['B'][i] def test_register_readers_with_same_name_on_different_classes(): # No errors should be generated if the same name is registered for # different objects...but this failed under python3 io_registry.register_reader('test', TestData, lambda: TestData()) io_registry.register_reader('test', Table, lambda: Table()) t = TestData.read(format='test') assert isinstance(t, TestData) tbl = Table.read(format='test') assert isinstance(tbl, Table) def test_inherited_registration(): # check that multi-generation inheritance works properly, # meaning that a child inherits from parents before # grandparents, see astropy/astropy#7156 class Child1(Table): pass class Child2(Child1): pass def _read(): return Table() def _read1(): return Child1() # check that reader gets inherited io_registry.register_reader('test', Table, _read) assert io_registry.get_reader('test', Child2) is _read # check that nearest ancestor is identified # (i.e. that the reader for Child2 is the registered method # for Child1, and not Table) io_registry.register_reader('test', Child1, _read1) assert io_registry.get_reader('test', Child2) is _read1 def teardown_function(function): _readers.update(_READERS_ORIGINAL) _writers.update(_WRITERS_ORIGINAL) _identifiers.update(_IDENTIFIERS_ORIGINAL) class TestSubclass: """ Test using registry with a Table sub-class """ def test_read_table_subclass(self): class MyTable(Table): pass data = ['a b', '1 2'] mt = MyTable.read(data, format='ascii') t = Table.read(data, format='ascii') assert np.all(mt == t) assert mt.colnames == t.colnames assert type(mt) is MyTable def test_write_table_subclass(self): buffer = StringIO() class MyTable(Table): pass mt = MyTable([[1], [2]], names=['a', 'b']) mt.write(buffer, format='ascii') assert buffer.getvalue() == os.linesep.join(['a b', '1 2', ''])
c3ee1ac6a28e719a5dd82b0d3631906a5ea1880784ead5ab383aadcc6818adbd
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module handles the conversion of various VOTABLE datatypes to/from TABLEDATA_ and BINARY_ formats. """ # STDLIB import re import sys from struct import unpack as _struct_unpack from struct import pack as _struct_pack # THIRD-PARTY import numpy as np from numpy import ma # ASTROPY from ...utils.xml.writer import xml_escape_cdata # LOCAL from .exceptions import (vo_raise, vo_warn, warn_or_raise, W01, W30, W31, W39, W46, W47, W49, W51, E01, E02, E03, E04, E05, E06) __all__ = ['get_converter', 'Converter', 'table_column_to_votable_datatype'] pedantic_array_splitter = re.compile(r" +") array_splitter = re.compile(r"\s+|(?:\s*,\s*)") """ A regex to handle splitting values on either whitespace or commas. SPEC: Usage of commas is not actually allowed by the spec, but many files in the wild use them. """ _zero_int = b'\0\0\0\0' _empty_bytes = b'' _zero_byte = b'\0' struct_unpack = _struct_unpack struct_pack = _struct_pack if sys.byteorder == 'little': def _ensure_bigendian(x): if x.dtype.byteorder != '>': return x.byteswap() return x else: def _ensure_bigendian(x): if x.dtype.byteorder == '<': return x.byteswap() return x def _make_masked_array(data, mask): """ Masked arrays of zero length that also have a mask of zero length cause problems in Numpy (at least in 1.6.2). This function creates a masked array from data and a mask, unless it is zero length. """ # np.ma doesn't like setting mask to [] if len(data): return ma.array( np.array(data), mask=np.array(mask, dtype='bool')) else: return ma.array(np.array(data)) def bitarray_to_bool(data, length): """ Converts a bit array (a string of bits in a bytes object) to a boolean Numpy array. Parameters ---------- data : bytes The bit array. The most significant byte is read first. length : int The number of bits to read. The least significant bits in the data bytes beyond length will be ignored. Returns ------- array : numpy bool array """ results = [] for byte in data: for bit_no in range(7, -1, -1): bit = byte & (1 << bit_no) bit = (bit != 0) results.append(bit) if len(results) == length: break if len(results) == length: break return np.array(results, dtype='b1') def bool_to_bitarray(value): """ Converts a numpy boolean array to a bit array (a string of bits in a bytes object). Parameters ---------- value : numpy bool array Returns ------- bit_array : bytes The first value in the input array will be the most significant bit in the result. The length will be `floor((N + 7) / 8)` where `N` is the length of `value`. """ value = value.flat bit_no = 7 byte = 0 bytes = [] for v in value: if v: byte |= 1 << bit_no if bit_no == 0: bytes.append(byte) bit_no = 7 byte = 0 else: bit_no -= 1 if bit_no != 7: bytes.append(byte) return struct_pack("{}B".format(len(bytes)), *bytes) class Converter: """ The base class for all converters. Each subclass handles converting a specific VOTABLE data type to/from the TABLEDATA_ and BINARY_ on-disk representations. Parameters ---------- field : `~astropy.io.votable.tree.Field` object describing the datatype config : dict The parser configuration dictionary pos : tuple The position in the XML file where the FIELD object was found. Used for error messages. """ def __init__(self, field, config=None, pos=None): pass @staticmethod def _parse_length(read): return struct_unpack(">I", read(4))[0] @staticmethod def _write_length(length): return struct_pack(">I", int(length)) def supports_empty_values(self, config): """ Returns True when the field can be completely empty. """ return config.get('version_1_3_or_later') def parse(self, value, config=None, pos=None): """ Convert the string *value* from the TABLEDATA_ format into an object with the correct native in-memory datatype and mask flag. Parameters ---------- value : str value in TABLEDATA format Returns ------- native : tuple (value, mask) The value as a Numpy array or scalar, and *mask* is True if the value is missing. """ raise NotImplementedError( "This datatype must implement a 'parse' method.") def parse_scalar(self, value, config=None, pos=None): """ Parse a single scalar of the underlying type of the converter. For non-array converters, this is equivalent to parse. For array converters, this is used to parse a single element of the array. Parameters ---------- value : str value in TABLEDATA format Returns ------- native : tuple (value, mask) The value as a Numpy array or scalar, and *mask* is True if the value is missing. """ return self.parse(value, config, pos) def output(self, value, mask): """ Convert the object *value* (in the native in-memory datatype) to a unicode string suitable for serializing in the TABLEDATA_ format. Parameters ---------- value : native type corresponding to this converter The value mask : bool If `True`, will return the string representation of a masked value. Returns ------- tabledata_repr : unicode """ raise NotImplementedError( "This datatype must implement a 'output' method.") def binparse(self, read): """ Reads some number of bytes from the BINARY_ format representation by calling the function *read*, and returns the native in-memory object representation for the datatype handled by *self*. Parameters ---------- read : function A function that given a number of bytes, returns a byte string. Returns ------- native : tuple (value, mask) The value as a Numpy array or scalar, and *mask* is True if the value is missing. """ raise NotImplementedError( "This datatype must implement a 'binparse' method.") def binoutput(self, value, mask): """ Convert the object *value* in the native in-memory datatype to a string of bytes suitable for serialization in the BINARY_ format. Parameters ---------- value : native type corresponding to this converter The value mask : bool If `True`, will return the string representation of a masked value. Returns ------- bytes : byte string The binary representation of the value, suitable for serialization in the BINARY_ format. """ raise NotImplementedError( "This datatype must implement a 'binoutput' method.") class Char(Converter): """ Handles the char datatype. (7-bit unsigned characters) Missing values are not handled for string or unicode types. """ default = _empty_bytes def __init__(self, field, config=None, pos=None): if config is None: config = {} Converter.__init__(self, field, config, pos) if field.arraysize is None: vo_warn(W47, (), config, pos) field.arraysize = '1' if field.arraysize == '*': self.format = 'O' self.binparse = self._binparse_var self.binoutput = self._binoutput_var self.arraysize = '*' else: if field.arraysize.endswith('*'): field.arraysize = field.arraysize[:-1] try: self.arraysize = int(field.arraysize) except ValueError: vo_raise(E01, (field.arraysize, 'char', field.ID), config) self.format = 'S{:d}'.format(self.arraysize) self.binparse = self._binparse_fixed self.binoutput = self._binoutput_fixed self._struct_format = ">{:d}s".format(self.arraysize) if config.get('pedantic'): self.parse = self._ascii_parse else: self.parse = self._str_parse def supports_empty_values(self, config): return True def _ascii_parse(self, value, config=None, pos=None): if self.arraysize != '*' and len(value) > self.arraysize: vo_warn(W46, ('char', self.arraysize), config, pos) return value.encode('ascii'), False def _str_parse(self, value, config=None, pos=None): if self.arraysize != '*' and len(value) > self.arraysize: vo_warn(W46, ('char', self.arraysize), config, pos) return value.encode('utf-8'), False def output(self, value, mask): if mask: return '' if not isinstance(value, str): value = value.decode('ascii') return xml_escape_cdata(value) def _binparse_var(self, read): length = self._parse_length(read) return read(length), False def _binparse_fixed(self, read): s = struct_unpack(self._struct_format, read(self.arraysize))[0] end = s.find(_zero_byte) if end != -1: return s[:end], False return s, False def _binoutput_var(self, value, mask): if mask or value is None or value == '': return _zero_int return self._write_length(len(value)) + value def _binoutput_fixed(self, value, mask): if mask: value = _empty_bytes return struct_pack(self._struct_format, value) class UnicodeChar(Converter): """ Handles the unicodeChar data type. UTF-16-BE. Missing values are not handled for string or unicode types. """ default = '' def __init__(self, field, config=None, pos=None): Converter.__init__(self, field, config, pos) if field.arraysize is None: vo_warn(W47, (), config, pos) field.arraysize = '1' if field.arraysize == '*': self.format = 'O' self.binparse = self._binparse_var self.binoutput = self._binoutput_var self.arraysize = '*' else: try: self.arraysize = int(field.arraysize) except ValueError: vo_raise(E01, (field.arraysize, 'unicode', field.ID), config) self.format = 'U{:d}'.format(self.arraysize) self.binparse = self._binparse_fixed self.binoutput = self._binoutput_fixed self._struct_format = ">{:d}s".format(self.arraysize * 2) def parse(self, value, config=None, pos=None): if self.arraysize != '*' and len(value) > self.arraysize: vo_warn(W46, ('unicodeChar', self.arraysize), config, pos) return value, False def output(self, value, mask): if mask: return '' return xml_escape_cdata(str(value)) def _binparse_var(self, read): length = self._parse_length(read) return read(length * 2).decode('utf_16_be'), False def _binparse_fixed(self, read): s = struct_unpack(self._struct_format, read(self.arraysize * 2))[0] s = s.decode('utf_16_be') end = s.find('\0') if end != -1: return s[:end], False return s, False def _binoutput_var(self, value, mask): if mask or value is None or value == '': return _zero_int encoded = value.encode('utf_16_be') return self._write_length(len(encoded) / 2) + encoded def _binoutput_fixed(self, value, mask): if mask: value = '' return struct_pack(self._struct_format, value.encode('utf_16_be')) class Array(Converter): """ Handles both fixed and variable-lengths arrays. """ def __init__(self, field, config=None, pos=None): if config is None: config = {} Converter.__init__(self, field, config, pos) if config.get('pedantic'): self._splitter = self._splitter_pedantic else: self._splitter = self._splitter_lax def parse_scalar(self, value, config=None, pos=0): return self._base.parse_scalar(value, config, pos) @staticmethod def _splitter_pedantic(value, config=None, pos=None): return pedantic_array_splitter.split(value) @staticmethod def _splitter_lax(value, config=None, pos=None): if ',' in value: vo_warn(W01, (), config, pos) return array_splitter.split(value) class VarArray(Array): """ Handles variable lengths arrays (i.e. where *arraysize* is '*'). """ format = 'O' def __init__(self, field, base, arraysize, config=None, pos=None): Array.__init__(self, field, config) self._base = base self.default = np.array([], dtype=self._base.format) def output(self, value, mask): output = self._base.output result = [output(x, m) for x, m in np.broadcast(value, mask)] return ' '.join(result) def binparse(self, read): length = self._parse_length(read) result = [] result_mask = [] binparse = self._base.binparse for i in range(length): val, mask = binparse(read) result.append(val) result_mask.append(mask) return _make_masked_array(result, result_mask), False def binoutput(self, value, mask): if value is None or len(value) == 0: return _zero_int length = len(value) result = [self._write_length(length)] binoutput = self._base.binoutput for x, m in zip(value, value.mask): result.append(binoutput(x, m)) return _empty_bytes.join(result) class ArrayVarArray(VarArray): """ Handles an array of variable-length arrays, i.e. where *arraysize* ends in '*'. """ def parse(self, value, config=None, pos=None): if value.strip() == '': return ma.array([]), False parts = self._splitter(value, config, pos) items = self._base._items parse_parts = self._base.parse_parts if len(parts) % items != 0: vo_raise(E02, (items, len(parts)), config, pos) result = [] result_mask = [] for i in range(0, len(parts), items): value, mask = parse_parts(parts[i:i+items], config, pos) result.append(value) result_mask.append(mask) return _make_masked_array(result, result_mask), False class ScalarVarArray(VarArray): """ Handles a variable-length array of numeric scalars. """ def parse(self, value, config=None, pos=None): if value.strip() == '': return ma.array([]), False parts = self._splitter(value, config, pos) parse = self._base.parse result = [] result_mask = [] for x in parts: value, mask = parse(x, config, pos) result.append(value) result_mask.append(mask) return _make_masked_array(result, result_mask), False class NumericArray(Array): """ Handles a fixed-length array of numeric scalars. """ vararray_type = ArrayVarArray def __init__(self, field, base, arraysize, config=None, pos=None): Array.__init__(self, field, config, pos) self._base = base self._arraysize = arraysize self.format = "{}{}".format(tuple(arraysize), base.format) self._items = 1 for dim in arraysize: self._items *= dim self._memsize = np.dtype(self.format).itemsize self._bigendian_format = '>' + self.format self.default = np.empty(arraysize, dtype=self._base.format) self.default[...] = self._base.default def parse(self, value, config=None, pos=None): if config is None: config = {} elif config['version_1_3_or_later'] and value == '': return np.zeros(self._arraysize, dtype=self._base.format), True parts = self._splitter(value, config, pos) if len(parts) != self._items: warn_or_raise(E02, E02, (self._items, len(parts)), config, pos) if config.get('pedantic'): return self.parse_parts(parts, config, pos) else: if len(parts) == self._items: pass elif len(parts) > self._items: parts = parts[:self._items] else: parts = (parts + ([self._base.default] * (self._items - len(parts)))) return self.parse_parts(parts, config, pos) def parse_parts(self, parts, config=None, pos=None): base_parse = self._base.parse result = [] result_mask = [] for x in parts: value, mask = base_parse(x, config, pos) result.append(value) result_mask.append(mask) result = np.array(result, dtype=self._base.format).reshape( self._arraysize) result_mask = np.array(result_mask, dtype='bool').reshape( self._arraysize) return result, result_mask def output(self, value, mask): base_output = self._base.output value = np.asarray(value) mask = np.asarray(mask) return ' '.join(base_output(x, m) for x, m in zip(value.flat, mask.flat)) def binparse(self, read): result = np.frombuffer(read(self._memsize), dtype=self._bigendian_format)[0] result_mask = self._base.is_null(result) return result, result_mask def binoutput(self, value, mask): filtered = self._base.filter_array(value, mask) filtered = _ensure_bigendian(filtered) return filtered.tostring() class Numeric(Converter): """ The base class for all numeric data types. """ array_type = NumericArray vararray_type = ScalarVarArray null = None def __init__(self, field, config=None, pos=None): Converter.__init__(self, field, config, pos) self._memsize = np.dtype(self.format).itemsize self._bigendian_format = '>' + self.format if field.values.null is not None: self.null = np.asarray(field.values.null, dtype=self.format) self.default = self.null self.is_null = self._is_null else: self.is_null = np.isnan def binparse(self, read): result = np.frombuffer(read(self._memsize), dtype=self._bigendian_format) return result[0], self.is_null(result[0]) def _is_null(self, value): return value == self.null class FloatingPoint(Numeric): """ The base class for floating-point datatypes. """ default = np.nan def __init__(self, field, config=None, pos=None): if config is None: config = {} Numeric.__init__(self, field, config, pos) precision = field.precision width = field.width if precision is None: format_parts = ['{!r:>'] else: format_parts = ['{:'] if width is not None: format_parts.append(str(width)) if precision is not None: if precision.startswith("E"): format_parts.append('.{:d}g'.format(int(precision[1:]))) elif precision.startswith("F"): format_parts.append('.{:d}f'.format(int(precision[1:]))) else: format_parts.append('.{:d}f'.format(int(precision))) format_parts.append('}') self._output_format = ''.join(format_parts) self.nan = np.array(np.nan, self.format) if self.null is None: self._null_output = 'NaN' self._null_binoutput = self.binoutput(self.nan, False) self.filter_array = self._filter_nan else: self._null_output = self.output(np.asarray(self.null), False) self._null_binoutput = self.binoutput(np.asarray(self.null), False) self.filter_array = self._filter_null if config.get('pedantic'): self.parse = self._parse_pedantic else: self.parse = self._parse_permissive def supports_empty_values(self, config): return True def _parse_pedantic(self, value, config=None, pos=None): if value.strip() == '': return self.null, True f = float(value) return f, self.is_null(f) def _parse_permissive(self, value, config=None, pos=None): try: f = float(value) return f, self.is_null(f) except ValueError: # IRSA VOTables use the word 'null' to specify empty values, # but this is not defined in the VOTable spec. if value.strip() != '': vo_warn(W30, value, config, pos) return self.null, True @property def output_format(self): return self._output_format def output(self, value, mask): if mask: return self._null_output if np.isfinite(value): if not np.isscalar(value): value = value.dtype.type(value) result = self._output_format.format(value) if result.startswith('array'): raise RuntimeError() if (self._output_format[2] == 'r' and result.endswith('.0')): result = result[:-2] return result elif np.isnan(value): return 'NaN' elif np.isposinf(value): return '+InF' elif np.isneginf(value): return '-InF' # Should never raise vo_raise("Invalid floating point value '{}'".format(value)) def binoutput(self, value, mask): if mask: return self._null_binoutput value = _ensure_bigendian(value) return value.tostring() def _filter_nan(self, value, mask): return np.where(mask, np.nan, value) def _filter_null(self, value, mask): return np.where(mask, self.null, value) class Double(FloatingPoint): """ Handles the double datatype. Double-precision IEEE floating-point. """ format = 'f8' class Float(FloatingPoint): """ Handles the float datatype. Single-precision IEEE floating-point. """ format = 'f4' class Integer(Numeric): """ The base class for all the integral datatypes. """ default = 0 def __init__(self, field, config=None, pos=None): Numeric.__init__(self, field, config, pos) def parse(self, value, config=None, pos=None): if config is None: config = {} mask = False if isinstance(value, str): value = value.lower() if value == '': if config['version_1_3_or_later']: mask = True else: warn_or_raise(W49, W49, (), config, pos) if self.null is not None: value = self.null else: value = self.default elif value == 'nan': mask = True if self.null is None: warn_or_raise(W31, W31, (), config, pos) value = self.default else: value = self.null elif value.startswith('0x'): value = int(value[2:], 16) else: value = int(value, 10) else: value = int(value) if self.null is not None and value == self.null: mask = True if value < self.val_range[0]: warn_or_raise(W51, W51, (value, self.bit_size), config, pos) value = self.val_range[0] elif value > self.val_range[1]: warn_or_raise(W51, W51, (value, self.bit_size), config, pos) value = self.val_range[1] return value, mask def output(self, value, mask): if mask: if self.null is None: warn_or_raise(W31, W31) return 'NaN' return str(self.null) return str(value) def binoutput(self, value, mask): if mask: if self.null is None: vo_raise(W31) else: value = self.null value = _ensure_bigendian(value) return value.tostring() def filter_array(self, value, mask): if np.any(mask): if self.null is not None: return np.where(mask, self.null, value) else: vo_raise(W31) return value class UnsignedByte(Integer): """ Handles the unsignedByte datatype. Unsigned 8-bit integer. """ format = 'u1' val_range = (0, 255) bit_size = '8-bit unsigned' class Short(Integer): """ Handles the short datatype. Signed 16-bit integer. """ format = 'i2' val_range = (-32768, 32767) bit_size = '16-bit' class Int(Integer): """ Handles the int datatype. Signed 32-bit integer. """ format = 'i4' val_range = (-2147483648, 2147483647) bit_size = '32-bit' class Long(Integer): """ Handles the long datatype. Signed 64-bit integer. """ format = 'i8' val_range = (-9223372036854775808, 9223372036854775807) bit_size = '64-bit' class ComplexArrayVarArray(VarArray): """ Handles an array of variable-length arrays of complex numbers. """ def parse(self, value, config=None, pos=None): if value.strip() == '': return ma.array([]), True parts = self._splitter(value, config, pos) items = self._base._items parse_parts = self._base.parse_parts if len(parts) % items != 0: vo_raise(E02, (items, len(parts)), config, pos) result = [] result_mask = [] for i in range(0, len(parts), items): value, mask = parse_parts(parts[i:i + items], config, pos) result.append(value) result_mask.append(mask) return _make_masked_array(result, result_mask), False class ComplexVarArray(VarArray): """ Handles a variable-length array of complex numbers. """ def parse(self, value, config=None, pos=None): if value.strip() == '': return ma.array([]), True parts = self._splitter(value, config, pos) parse_parts = self._base.parse_parts result = [] result_mask = [] for i in range(0, len(parts), 2): value = [float(x) for x in parts[i:i + 2]] value, mask = parse_parts(value, config, pos) result.append(value) result_mask.append(mask) return _make_masked_array( np.array(result, dtype=self._base.format), result_mask), False class ComplexArray(NumericArray): """ Handles a fixed-size array of complex numbers. """ vararray_type = ComplexArrayVarArray def __init__(self, field, base, arraysize, config=None, pos=None): NumericArray.__init__(self, field, base, arraysize, config, pos) self._items *= 2 def parse(self, value, config=None, pos=None): parts = self._splitter(value, config, pos) if parts == ['']: parts = [] return self.parse_parts(parts, config, pos) def parse_parts(self, parts, config=None, pos=None): if len(parts) != self._items: vo_raise(E02, (self._items, len(parts)), config, pos) base_parse = self._base.parse_parts result = [] result_mask = [] for i in range(0, self._items, 2): value = [float(x) for x in parts[i:i + 2]] value, mask = base_parse(value, config, pos) result.append(value) result_mask.append(mask) result = np.array( result, dtype=self._base.format).reshape(self._arraysize) result_mask = np.array( result_mask, dtype='bool').reshape(self._arraysize) return result, result_mask class Complex(FloatingPoint, Array): """ The base class for complex numbers. """ array_type = ComplexArray vararray_type = ComplexVarArray default = np.nan def __init__(self, field, config=None, pos=None): FloatingPoint.__init__(self, field, config, pos) Array.__init__(self, field, config, pos) def parse(self, value, config=None, pos=None): stripped = value.strip() if stripped == '' or stripped.lower() == 'nan': return np.nan, True splitter = self._splitter parts = [float(x) for x in splitter(value, config, pos)] if len(parts) != 2: vo_raise(E03, (value,), config, pos) return self.parse_parts(parts, config, pos) _parse_permissive = parse _parse_pedantic = parse def parse_parts(self, parts, config=None, pos=None): value = complex(*parts) return value, self.is_null(value) def output(self, value, mask): if mask: if self.null is None: return 'NaN' else: value = self.null real = self._output_format.format(float(value.real)) imag = self._output_format.format(float(value.imag)) if self._output_format[2] == 'r': if real.endswith('.0'): real = real[:-2] if imag.endswith('.0'): imag = imag[:-2] return real + ' ' + imag class FloatComplex(Complex): """ Handle floatComplex datatype. Pair of single-precision IEEE floating-point numbers. """ format = 'c8' class DoubleComplex(Complex): """ Handle doubleComplex datatype. Pair of double-precision IEEE floating-point numbers. """ format = 'c16' class BitArray(NumericArray): """ Handles an array of bits. """ vararray_type = ArrayVarArray def __init__(self, field, base, arraysize, config=None, pos=None): NumericArray.__init__(self, field, base, arraysize, config, pos) self._bytes = ((self._items - 1) // 8) + 1 @staticmethod def _splitter_pedantic(value, config=None, pos=None): return list(re.sub(r'\s', '', value)) @staticmethod def _splitter_lax(value, config=None, pos=None): if ',' in value: vo_warn(W01, (), config, pos) return list(re.sub(r'\s|,', '', value)) def output(self, value, mask): if np.any(mask): vo_warn(W39) value = np.asarray(value) mapping = {False: '0', True: '1'} return ''.join(mapping[x] for x in value.flat) def binparse(self, read): data = read(self._bytes) result = bitarray_to_bool(data, self._items) result = result.reshape(self._arraysize) result_mask = np.zeros(self._arraysize, dtype='b1') return result, result_mask def binoutput(self, value, mask): if np.any(mask): vo_warn(W39) return bool_to_bitarray(value) class Bit(Converter): """ Handles the bit datatype. """ format = 'b1' array_type = BitArray vararray_type = ScalarVarArray default = False binary_one = b'\x08' binary_zero = b'\0' def parse(self, value, config=None, pos=None): if config is None: config = {} mapping = {'1': True, '0': False} if value is False or value.strip() == '': if not config['version_1_3_or_later']: warn_or_raise(W49, W49, (), config, pos) return False, True else: try: return mapping[value], False except KeyError: vo_raise(E04, (value,), config, pos) def output(self, value, mask): if mask: vo_warn(W39) if value: return '1' else: return '0' def binparse(self, read): data = read(1) return (ord(data) & 0x8) != 0, False def binoutput(self, value, mask): if mask: vo_warn(W39) if value: return self.binary_one return self.binary_zero class BooleanArray(NumericArray): """ Handles an array of boolean values. """ vararray_type = ArrayVarArray def binparse(self, read): data = read(self._items) binparse = self._base.binparse_value result = [] result_mask = [] for char in data: value, mask = binparse(char) result.append(value) result_mask.append(mask) result = np.array(result, dtype='b1').reshape( self._arraysize) result_mask = np.array(result_mask, dtype='b1').reshape( self._arraysize) return result, result_mask def binoutput(self, value, mask): binoutput = self._base.binoutput value = np.asarray(value) mask = np.asarray(mask) result = [binoutput(x, m) for x, m in np.broadcast(value.flat, mask.flat)] return _empty_bytes.join(result) class Boolean(Converter): """ Handles the boolean datatype. """ format = 'b1' array_type = BooleanArray vararray_type = ScalarVarArray default = False binary_question_mark = b'?' binary_true = b'T' binary_false = b'F' def parse(self, value, config=None, pos=None): if value == '': return False, True if value is False: return False, True mapping = {'TRUE': (True, False), 'FALSE': (False, False), '1': (True, False), '0': (False, False), 'T': (True, False), 'F': (False, False), '\0': (False, True), ' ': (False, True), '?': (False, True), '': (False, True)} try: return mapping[value.upper()] except KeyError: vo_raise(E05, (value,), config, pos) def output(self, value, mask): if mask: return '?' if value: return 'T' return 'F' def binparse(self, read): value = ord(read(1)) return self.binparse_value(value) _binparse_mapping = { ord('T'): (True, False), ord('t'): (True, False), ord('1'): (True, False), ord('F'): (False, False), ord('f'): (False, False), ord('0'): (False, False), ord('\0'): (False, True), ord(' '): (False, True), ord('?'): (False, True)} def binparse_value(self, value): try: return self._binparse_mapping[value] except KeyError: vo_raise(E05, (value,)) def binoutput(self, value, mask): if mask: return self.binary_question_mark if value: return self.binary_true return self.binary_false converter_mapping = { 'double': Double, 'float': Float, 'bit': Bit, 'boolean': Boolean, 'unsignedByte': UnsignedByte, 'short': Short, 'int': Int, 'long': Long, 'floatComplex': FloatComplex, 'doubleComplex': DoubleComplex, 'char': Char, 'unicodeChar': UnicodeChar} def get_converter(field, config=None, pos=None): """ Get an appropriate converter instance for a given field. Parameters ---------- field : astropy.io.votable.tree.Field config : dict, optional Parser configuration dictionary pos : tuple Position in the input XML file. Used for error messages. Returns ------- converter : astropy.io.votable.converters.Converter """ if config is None: config = {} if field.datatype not in converter_mapping: vo_raise(E06, (field.datatype, field.ID), config) cls = converter_mapping[field.datatype] converter = cls(field, config, pos) arraysize = field.arraysize # With numeric datatypes, special things need to happen for # arrays. if (field.datatype not in ('char', 'unicodeChar') and arraysize is not None): if arraysize[-1] == '*': arraysize = arraysize[:-1] last_x = arraysize.rfind('x') if last_x == -1: arraysize = '' else: arraysize = arraysize[:last_x] fixed = False else: fixed = True if arraysize != '': arraysize = [int(x) for x in arraysize.split("x")] arraysize.reverse() else: arraysize = [] if arraysize != []: converter = converter.array_type( field, converter, arraysize, config) if not fixed: converter = converter.vararray_type( field, converter, arraysize, config) return converter numpy_dtype_to_field_mapping = { np.float64().dtype.num: 'double', np.float32().dtype.num: 'float', np.bool_().dtype.num: 'bit', np.uint8().dtype.num: 'unsignedByte', np.int16().dtype.num: 'short', np.int32().dtype.num: 'int', np.int64().dtype.num: 'long', np.complex64().dtype.num: 'floatComplex', np.complex128().dtype.num: 'doubleComplex', np.unicode_().dtype.num: 'unicodeChar' } numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char' def _all_bytes(column): for x in column: if not isinstance(x, bytes): return False return True def _all_unicode(column): for x in column: if not isinstance(x, str): return False return True def _all_matching_dtype(column): first_dtype = False first_shape = () for x in column: if not isinstance(x, np.ndarray) or len(x) == 0: continue if first_dtype is False: first_dtype = x.dtype first_shape = x.shape[1:] elif first_dtype != x.dtype: return False, () elif first_shape != x.shape[1:]: first_shape = () return first_dtype, first_shape def numpy_to_votable_dtype(dtype, shape): """ Converts a numpy dtype and shape to a dictionary of attributes for a VOTable FIELD element and correspond to that type. Parameters ---------- dtype : Numpy dtype instance shape : tuple Returns ------- attributes : dict A dict containing 'datatype' and 'arraysize' keys that can be set on a VOTable FIELD element. """ if dtype.num not in numpy_dtype_to_field_mapping: raise TypeError( "{0!r} can not be represented in VOTable".format(dtype)) if dtype.char == 'S': return {'datatype': 'char', 'arraysize': str(dtype.itemsize)} elif dtype.char == 'U': return {'datatype': 'unicodeChar', 'arraysize': str(dtype.itemsize // 4)} else: result = { 'datatype': numpy_dtype_to_field_mapping[dtype.num]} if len(shape): result['arraysize'] = 'x'.join(str(x) for x in shape) return result def table_column_to_votable_datatype(column): """ Given a `astropy.table.Column` instance, returns the attributes necessary to create a VOTable FIELD element that corresponds to the type of the column. This necessarily must perform some heuristics to determine the type of variable length arrays fields, since they are not directly supported by Numpy. If the column has dtype of "object", it performs the following tests: - If all elements are byte or unicode strings, it creates a variable-length byte or unicode field, respectively. - If all elements are numpy arrays of the same dtype and with a consistent shape in all but the first dimension, it creates a variable length array of fixed sized arrays. If the dtypes match, but the shapes do not, a variable length array is created. If the dtype of the input is not understood, it sets the data type to the most inclusive: a variable length unicodeChar array. Parameters ---------- column : `astropy.table.Column` instance Returns ------- attributes : dict A dict containing 'datatype' and 'arraysize' keys that can be set on a VOTable FIELD element. """ if column.dtype.char == 'O': if isinstance(column[0], bytes): if _all_bytes(column[1:]): return {'datatype': 'char', 'arraysize': '*'} elif isinstance(column[0], str): if _all_unicode(column[1:]): return {'datatype': 'unicodeChar', 'arraysize': '*'} elif isinstance(column[0], np.ndarray): dtype, shape = _all_matching_dtype(column) if dtype is not False: result = numpy_to_votable_dtype(dtype, shape) if 'arraysize' not in result: result['arraysize'] = '*' else: result['arraysize'] += '*' return result # All bets are off, do the most generic thing return {'datatype': 'unicodeChar', 'arraysize': '*'} return numpy_to_votable_dtype(column.dtype, column.shape[1:])
d27395af69224627f5e645156a03b53c07087c9d8c5af210215770f8b4f9e56f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from . import parse, from_table from .tree import VOTableFile, Table as VOTable from .. import registry as io_registry from ...table import Table from ...table.column import BaseColumn from ...units import Quantity def is_votable(origin, filepath, fileobj, *args, **kwargs): """ Reads the header of a file to determine if it is a VOTable file. Parameters ---------- origin : str or readable file-like object Path or file object containing a VOTABLE_ xml file. Returns ------- is_votable : bool Returns `True` if the given file is a VOTable file. """ from . import is_votable if origin == 'read': if fileobj is not None: try: result = is_votable(fileobj) finally: fileobj.seek(0) return result elif filepath is not None: return is_votable(filepath) elif isinstance(args[0], (VOTableFile, VOTable)): return True else: return False else: return False def read_table_votable(input, table_id=None, use_names_over_ids=False): """ Read a Table object from an VO table file Parameters ---------- input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.Table` If a string, the filename to read the table from. If a :class:`~astropy.io.votable.tree.VOTableFile` or :class:`~astropy.io.votable.tree.Table` object, the object to extract the table from. table_id : str or int, optional The table to read in. If a `str`, it is an ID corresponding to the ID of the table in the file (not all VOTable files assign IDs to their tables). If an `int`, it is the index of the table in the file, starting at 0. use_names_over_ids : bool, optional When `True` use the ``name`` attributes of columns as the names of columns in the `~astropy.table.Table` instance. Since names are not guaranteed to be unique, this may cause some columns to be renamed by appending numbers to the end. Otherwise (default), use the ID attributes as the column names. """ if not isinstance(input, (VOTableFile, VOTable)): input = parse(input, table_id=table_id) # Parse all table objects table_id_mapping = dict() tables = [] if isinstance(input, VOTableFile): for table in input.iter_tables(): if table.ID is not None: table_id_mapping[table.ID] = table tables.append(table) if len(tables) > 1: if table_id is None: raise ValueError( "Multiple tables found: table id should be set via " "the table_id= argument. The available tables are {0}, " 'or integers less than {1}.'.format( ', '.join(table_id_mapping.keys()), len(tables))) elif isinstance(table_id, str): if table_id in table_id_mapping: table = table_id_mapping[table_id] else: raise ValueError( "No tables with id={0} found".format(table_id)) elif isinstance(table_id, int): if table_id < len(tables): table = tables[table_id] else: raise IndexError( "Table index {0} is out of range. " "{1} tables found".format( table_id, len(tables))) elif len(tables) == 1: table = tables[0] else: raise ValueError("No table found") elif isinstance(input, VOTable): table = input # Convert to an astropy.table.Table object return table.to_table(use_names_over_ids=use_names_over_ids) def write_table_votable(input, output, table_id=None, overwrite=False, tabledata_format=None): """ Write a Table object to an VO table file Parameters ---------- input : Table The table to write out. output : str The filename to write the table to. table_id : str, optional The table ID to use. If this is not specified, the 'ID' keyword in the ``meta`` object of the table will be used. overwrite : bool, optional Whether to overwrite any existing file without warning. tabledata_format : str, optional The format of table data to write. Must be one of ``tabledata`` (text representation), ``binary`` or ``binary2``. Default is ``tabledata``. See :ref:`votable-serialization`. """ # Only those columns which are instances of BaseColumn or Quantity can be written unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity)) if unsupported_cols: unsupported_names = [col.info.name for col in unsupported_cols] raise ValueError('cannot write table with mixin column(s) {0} to VOTable' .format(unsupported_names)) # Check if output file already exists if isinstance(output, str) and os.path.exists(output): if overwrite: os.remove(output) else: raise OSError("File exists: {0}".format(output)) # Create a new VOTable file table_file = from_table(input, table_id=table_id) # Write out file table_file.to_xml(output, tabledata_format=tabledata_format) io_registry.register_reader('votable', Table, read_table_votable) io_registry.register_writer('votable', Table, write_table_votable) io_registry.register_identifier('votable', Table, is_votable)
f36676c04c08c9d9ffa271f675190ce95d71ab5bddad9d15a5de10854ec2cba4
# Licensed under a 3-clause BSD style license - see LICENSE.rst # TODO: Test FITS parsing # STDLIB import io import re import sys import gzip import base64 import codecs import urllib.request import warnings # THIRD-PARTY import numpy as np from numpy import ma # LOCAL from .. import fits from ... import __version__ as astropy_version from ...utils.collections import HomogeneousList from ...utils.xml.writer import XMLWriter from ...utils.exceptions import AstropyDeprecationWarning from ...utils.misc import InheritDocstrings from . import converters from .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise, warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12, W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28, W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43, W44, W45, W50, W52, W53, E06, E08, E09, E10, E11, E12, E13, E15, E16, E17, E18, E19, E20, E21) from . import ucd as ucd_mod from . import util from . import xmlutil try: from . import tablewriter _has_c_tabledata_writer = True except ImportError: _has_c_tabledata_writer = False __all__ = [ 'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys', 'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource', 'VOTableFile' ] # The default number of rows to read in each chunk before converting # to an array. DEFAULT_CHUNK_SIZE = 256 RESIZE_AMOUNT = 1.5 ###################################################################### # FACTORY FUNCTIONS def _resize(masked, new_size): """ Masked arrays can not be resized inplace, and `np.resize` and `ma.resize` are both incompatible with structured arrays. Therefore, we do all this. """ new_array = ma.zeros((new_size,), dtype=masked.dtype) length = min(len(masked), new_size) new_array[:length] = masked[:length] return new_array def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc): """ Creates a function useful for looking up an element by a given attribute. Parameters ---------- attr : str The attribute name unique : bool Should be `True` if the attribute is unique and therefore this should return only one value. Otherwise, returns a list of values. iterator : generator A generator that iterates over some arbitrary set of elements element_name : str The XML element name of the elements being iterated over (used for error messages only). doc : str A docstring to apply to the generated function. Returns ------- factory : function A function that looks up an element by the given attribute. """ def lookup_by_attr(self, ref, before=None): """ Given a string *ref*, finds the first element in the iterator where the given attribute == *ref*. If *before* is provided, will stop searching at the object *before*. This is important, since "forward references" are not allowed in the VOTABLE format. """ for element in getattr(self, iterator)(): if element is before: if getattr(element, attr, None) == ref: vo_raise( "{} references itself".format(element_name), element._config, element._pos, KeyError) break if getattr(element, attr, None) == ref: yield element def lookup_by_attr_unique(self, ref, before=None): for element in lookup_by_attr(self, ref, before=before): return element raise KeyError( "No {} with {} '{}' found before the referencing {}".format( element_name, attr, ref, element_name)) if unique: lookup_by_attr_unique.__doc__ = doc return lookup_by_attr_unique else: lookup_by_attr.__doc__ = doc return lookup_by_attr def _lookup_by_id_or_name_factory(iterator, element_name, doc): """ Like `_lookup_by_attr_factory`, but looks in both the "ID" and "name" attributes. """ def lookup_by_id_or_name(self, ref, before=None): """ Given an key *ref*, finds the first element in the iterator with the attribute ID == *ref* or name == *ref*. If *before* is provided, will stop searching at the object *before*. This is important, since "forward references" are not allowed in the VOTABLE format. """ for element in getattr(self, iterator)(): if element is before: if ref in (element.ID, element.name): vo_raise( "{} references itself".format(element_name), element._config, element._pos, KeyError) break if ref in (element.ID, element.name): return element raise KeyError( "No {} with ID or name '{}' found before the referencing {}".format( element_name, ref, element_name)) lookup_by_id_or_name.__doc__ = doc return lookup_by_id_or_name def _get_default_unit_format(config): """ Get the default unit format as specified in the VOTable spec. """ # In the future, this should take into account the VOTable # version. return 'cds' def _get_unit_format(config): """ Get the unit format based on the configuration. """ if config.get('unit_format') is None: format = _get_default_unit_format(config) else: format = config['unit_format'] return format ###################################################################### # ATTRIBUTE CHECKERS def check_astroyear(year, field, config=None, pos=None): """ Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *year* is not a valid astronomical year as defined by the VOTABLE standard. Parameters ---------- year : str An astronomical year string field : str The name of the field this year was found in (used for error message) config, pos : optional Information about the source of the value """ if (year is not None and re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None): warn_or_raise(W07, W07, (field, year), config, pos) return False return True def check_string(string, attr_name, config=None, pos=None): """ Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *string* is not a string or Unicode string. Parameters ---------- string : str An astronomical year string attr_name : str The name of the field this year was found in (used for error message) config, pos : optional Information about the source of the value """ if string is not None and not isinstance(string, str): warn_or_raise(W08, W08, attr_name, config, pos) return False return True def resolve_id(ID, id, config=None, pos=None): if ID is None and id is not None: warn_or_raise(W09, W09, (), config, pos) return id return ID def check_ucd(ucd, config=None, pos=None): """ Warns or raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not a valid `unified content descriptor`_ string as defined by the VOTABLE standard. Parameters ---------- ucd : str A UCD string. config, pos : optional Information about the source of the value """ if config is None: config = {} if config.get('version_1_1_or_later'): try: ucd_mod.parse_ucd( ucd, check_controlled_vocabulary=config.get( 'version_1_2_or_later', False), has_colon=config.get('version_1_2_or_later', False)) except ValueError as e: # This weird construction is for Python 3 compatibility if config.get('pedantic'): vo_raise(W06, (ucd, str(e)), config, pos) else: vo_warn(W06, (ucd, str(e)), config, pos) return False return True ###################################################################### # PROPERTY MIXINS class _IDProperty: @property def ID(self): """ The XML ID_ of the element. May be `None` or a string conforming to XML ID_ syntax. """ return self._ID @ID.setter def ID(self, ID): xmlutil.check_id(ID, 'ID', self._config, self._pos) self._ID = ID @ID.deleter def ID(self): self._ID = None class _NameProperty: @property def name(self): """An optional name for the element.""" return self._name @name.setter def name(self, name): xmlutil.check_token(name, 'name', self._config, self._pos) self._name = name @name.deleter def name(self): self._name = None class _XtypeProperty: @property def xtype(self): """Extended data type information.""" return self._xtype @xtype.setter def xtype(self, xtype): if xtype is not None and not self._config.get('version_1_2_or_later'): warn_or_raise( W28, W28, ('xtype', self._element_name, '1.2'), self._config, self._pos) check_string(xtype, 'xtype', self._config, self._pos) self._xtype = xtype @xtype.deleter def xtype(self): self._xtype = None class _UtypeProperty: _utype_in_v1_2 = False @property def utype(self): """The usage-specific or `unique type`_ of the element.""" return self._utype @utype.setter def utype(self, utype): if (self._utype_in_v1_2 and utype is not None and not self._config.get('version_1_2_or_later')): warn_or_raise( W28, W28, ('utype', self._element_name, '1.2'), self._config, self._pos) check_string(utype, 'utype', self._config, self._pos) self._utype = utype @utype.deleter def utype(self): self._utype = None class _UcdProperty: _ucd_in_v1_2 = False @property def ucd(self): """The `unified content descriptor`_ for the element.""" return self._ucd @ucd.setter def ucd(self, ucd): if ucd is not None and ucd.strip() == '': ucd = None if ucd is not None: if (self._ucd_in_v1_2 and not self._config.get('version_1_2_or_later')): warn_or_raise( W28, W28, ('ucd', self._element_name, '1.2'), self._config, self._pos) check_ucd(ucd, self._config, self._pos) self._ucd = ucd @ucd.deleter def ucd(self): self._ucd = None class _DescriptionProperty: @property def description(self): """ An optional string describing the element. Corresponds to the DESCRIPTION_ element. """ return self._description @description.setter def description(self, description): self._description = description @description.deleter def description(self): self._description = None ###################################################################### # ELEMENT CLASSES class Element(metaclass=InheritDocstrings): """ A base class for all classes that represent XML elements in the VOTABLE file. """ _element_name = '' _attr_list = [] def _add_unknown_tag(self, iterator, tag, data, config, pos): warn_or_raise(W10, W10, tag, config, pos) def _ignore_add(self, iterator, tag, data, config, pos): warn_unknown_attrs(tag, data.keys(), config, pos) def _add_definitions(self, iterator, tag, data, config, pos): if config.get('version_1_1_or_later'): warn_or_raise(W22, W22, (), config, pos) warn_unknown_attrs(tag, data.keys(), config, pos) def parse(self, iterator, config): """ For internal use. Parse the XML content of the children of the element. Parameters ---------- iterator : xml iterator An iterator over XML elements as returned by `~astropy.utils.xml.iterparser.get_xml_iterator`. config : dict The configuration dictionary that affects how certain elements are read. Returns ------- self : Element Returns self as a convenience. """ raise NotImplementedError() def to_xml(self, w, **kwargs): """ For internal use. Output the element to XML. Parameters ---------- w : astropy.utils.xml.writer.XMLWriter object An XML writer to write to. kwargs : dict Any configuration parameters to control the output. """ raise NotImplementedError() class SimpleElement(Element): """ A base class for simple elements, such as FIELD, PARAM and INFO that don't require any special parsing or outputting machinery. """ def __init__(self): Element.__init__(self) def __repr__(self): buff = io.StringIO() SimpleElement.to_xml(self, XMLWriter(buff)) return buff.getvalue().strip() def parse(self, iterator, config): for start, tag, data, pos in iterator: if start and tag != self._element_name: self._add_unknown_tag(iterator, tag, data, config, pos) elif tag == self._element_name: break return self def to_xml(self, w, **kwargs): w.element(self._element_name, attrib=w.object_attrs(self, self._attr_list)) class SimpleElementWithContent(SimpleElement): """ A base class for simple elements, such as FIELD, PARAM and INFO that don't require any special parsing or outputting machinery. """ def __init__(self): SimpleElement.__init__(self) self._content = None def parse(self, iterator, config): for start, tag, data, pos in iterator: if start and tag != self._element_name: self._add_unknown_tag(iterator, tag, data, config, pos) elif tag == self._element_name: if data: self.content = data break return self def to_xml(self, w, **kwargs): w.element(self._element_name, self._content, attrib=w.object_attrs(self, self._attr_list)) @property def content(self): """The content of the element.""" return self._content @content.setter def content(self, content): check_string(content, 'content', self._config, self._pos) self._content = content @content.deleter def content(self): self._content = None class Link(SimpleElement, _IDProperty): """ LINK_ elements: used to reference external documents and servers through a URI. The keyword arguments correspond to setting members of the same name, documented below. """ _attr_list = ['ID', 'content_role', 'content_type', 'title', 'value', 'href', 'action'] _element_name = 'LINK' def __init__(self, ID=None, title=None, value=None, href=None, action=None, id=None, config=None, pos=None, **kwargs): if config is None: config = {} self._config = config self._pos = pos SimpleElement.__init__(self) content_role = kwargs.get('content-role') or kwargs.get('content_role') content_type = kwargs.get('content-type') or kwargs.get('content_type') if 'gref' in kwargs: warn_or_raise(W11, W11, (), config, pos) self.ID = resolve_id(ID, id, config, pos) self.content_role = content_role self.content_type = content_type self.title = title self.value = value self.href = href self.action = action warn_unknown_attrs( 'LINK', kwargs.keys(), config, pos, ['content-role', 'content_role', 'content-type', 'content_type', 'gref']) @property def content_role(self): """ Defines the MIME role of the referenced object. Must be one of: None, 'query', 'hints', 'doc', 'location' or 'type' """ return self._content_role @content_role.setter def content_role(self, content_role): if ((content_role == 'type' and not self._config['version_1_3_or_later']) or content_role not in (None, 'query', 'hints', 'doc', 'location')): vo_warn(W45, (content_role,), self._config, self._pos) self._content_role = content_role @content_role.deleter def content_role(self): self._content_role = None @property def content_type(self): """Defines the MIME content type of the referenced object.""" return self._content_type @content_type.setter def content_type(self, content_type): xmlutil.check_mime_content_type(content_type, self._config, self._pos) self._content_type = content_type @content_type.deleter def content_type(self): self._content_type = None @property def href(self): """ A URI to an arbitrary protocol. The vo package only supports http and anonymous ftp. """ return self._href @href.setter def href(self, href): xmlutil.check_anyuri(href, self._config, self._pos) self._href = href @href.deleter def href(self): self._href = None def to_table_column(self, column): meta = {} for key in self._attr_list: val = getattr(self, key, None) if val is not None: meta[key] = val column.meta.setdefault('links', []) column.meta['links'].append(meta) @classmethod def from_table_column(cls, d): return cls(**d) class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty, _UtypeProperty): """ INFO_ elements: arbitrary key-value pairs for extensions to the standard. The keyword arguments correspond to setting members of the same name, documented below. """ _element_name = 'INFO' _attr_list_11 = ['ID', 'name', 'value'] _attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype'] _utype_in_v1_2 = True def __init__(self, ID=None, name=None, value=None, id=None, xtype=None, ref=None, unit=None, ucd=None, utype=None, config=None, pos=None, **extra): if config is None: config = {} self._config = config self._pos = pos SimpleElementWithContent.__init__(self) self.ID = (resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)) self.name = name self.value = value self.xtype = xtype self.ref = ref self.unit = unit self.ucd = ucd self.utype = utype if config.get('version_1_2_or_later'): self._attr_list = self._attr_list_12 else: self._attr_list = self._attr_list_11 if xtype is not None: warn_unknown_attrs('INFO', ['xtype'], config, pos) if ref is not None: warn_unknown_attrs('INFO', ['ref'], config, pos) if unit is not None: warn_unknown_attrs('INFO', ['unit'], config, pos) if ucd is not None: warn_unknown_attrs('INFO', ['ucd'], config, pos) if utype is not None: warn_unknown_attrs('INFO', ['utype'], config, pos) warn_unknown_attrs('INFO', extra.keys(), config, pos) @property def name(self): """[*required*] The key of the key-value pair.""" return self._name @name.setter def name(self, name): if name is None: warn_or_raise(W35, W35, ('name'), self._config, self._pos) xmlutil.check_token(name, 'name', self._config, self._pos) self._name = name @property def value(self): """ [*required*] The value of the key-value pair. (Always stored as a string or unicode string). """ return self._value @value.setter def value(self, value): if value is None: warn_or_raise(W35, W35, ('value'), self._config, self._pos) check_string(value, 'value', self._config, self._pos) self._value = value @property def content(self): """The content inside the INFO element.""" return self._content @content.setter def content(self, content): check_string(content, 'content', self._config, self._pos) self._content = content @content.deleter def content(self): self._content = None @property def ref(self): """ Refer to another INFO_ element by ID_, defined previously in the document. """ return self._ref @ref.setter def ref(self, ref): if ref is not None and not self._config.get('version_1_2_or_later'): warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'), self._config, self._pos) xmlutil.check_id(ref, 'ref', self._config, self._pos) # TODO: actually apply the reference # if ref is not None: # try: # other = self._votable.get_values_by_id(ref, before=self) # except KeyError: # vo_raise( # "VALUES ref='%s', which has not already been defined." % # self.ref, self._config, self._pos, KeyError) # self.null = other.null # self.type = other.type # self.min = other.min # self.min_inclusive = other.min_inclusive # self.max = other.max # self.max_inclusive = other.max_inclusive # self._options[:] = other.options self._ref = ref @ref.deleter def ref(self): self._ref = None @property def unit(self): """A string specifying the units_ for the INFO_.""" return self._unit @unit.setter def unit(self, unit): if unit is None: self._unit = None return from ... import units as u if not self._config.get('version_1_2_or_later'): warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'), self._config, self._pos) # First, parse the unit in the default way, so that we can # still emit a warning if the unit is not to spec. default_format = _get_default_unit_format(self._config) unit_obj = u.Unit( unit, format=default_format, parse_strict='silent') if isinstance(unit_obj, u.UnrecognizedUnit): warn_or_raise(W50, W50, (unit,), self._config, self._pos) format = _get_unit_format(self._config) if format != default_format: unit_obj = u.Unit( unit, format=format, parse_strict='silent') self._unit = unit_obj @unit.deleter def unit(self): self._unit = None def to_xml(self, w, **kwargs): attrib = w.object_attrs(self, self._attr_list) if 'unit' in attrib: attrib['unit'] = self.unit.to_string('cds') w.element(self._element_name, self._content, attrib=attrib) class Values(Element, _IDProperty): """ VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values. The keyword arguments correspond to setting members of the same name, documented below. """ def __init__(self, votable, field, ID=None, null=None, ref=None, type="legal", id=None, config=None, pos=None, **extras): if config is None: config = {} self._config = config self._pos = pos Element.__init__(self) self._votable = votable self._field = field self.ID = resolve_id(ID, id, config, pos) self.null = null self._ref = ref self.type = type self.min = None self.max = None self.min_inclusive = True self.max_inclusive = True self._options = [] warn_unknown_attrs('VALUES', extras.keys(), config, pos) def __repr__(self): buff = io.StringIO() self.to_xml(XMLWriter(buff)) return buff.getvalue().strip() @property def null(self): """ For integral datatypes, *null* is used to define the value used for missing values. """ return self._null @null.setter def null(self, null): if null is not None and isinstance(null, str): try: null_val = self._field.converter.parse_scalar( null, self._config, self._pos)[0] except Exception: warn_or_raise(W36, W36, null, self._config, self._pos) null_val = self._field.converter.parse_scalar( '0', self._config, self._pos)[0] else: null_val = null self._null = null_val @null.deleter def null(self): self._null = None @property def type(self): """ [*required*] Defines the applicability of the domain defined by this VALUES_ element. Must be one of the following strings: - 'legal': The domain of this column applies in general to this datatype. (default) - 'actual': The domain of this column applies only to the data enclosed in the parent table. """ return self._type @type.setter def type(self, type): if type not in ('legal', 'actual'): vo_raise(E08, type, self._config, self._pos) self._type = type @property def ref(self): """ Refer to another VALUES_ element by ID_, defined previously in the document, for MIN/MAX/OPTION information. """ return self._ref @ref.setter def ref(self, ref): xmlutil.check_id(ref, 'ref', self._config, self._pos) if ref is not None: try: other = self._votable.get_values_by_id(ref, before=self) except KeyError: warn_or_raise(W43, W43, ('VALUES', self.ref), self._config, self._pos) ref = None else: self.null = other.null self.type = other.type self.min = other.min self.min_inclusive = other.min_inclusive self.max = other.max self.max_inclusive = other.max_inclusive self._options[:] = other.options self._ref = ref @ref.deleter def ref(self): self._ref = None @property def min(self): """ The minimum value of the domain. See :attr:`min_inclusive`. """ return self._min @min.setter def min(self, min): if hasattr(self._field, 'converter') and min is not None: self._min = self._field.converter.parse(min)[0] else: self._min = min @min.deleter def min(self): self._min = None @property def min_inclusive(self): """When `True`, the domain includes the minimum value.""" return self._min_inclusive @min_inclusive.setter def min_inclusive(self, inclusive): if inclusive == 'yes': self._min_inclusive = True elif inclusive == 'no': self._min_inclusive = False else: self._min_inclusive = bool(inclusive) @min_inclusive.deleter def min_inclusive(self): self._min_inclusive = True @property def max(self): """ The maximum value of the domain. See :attr:`max_inclusive`. """ return self._max @max.setter def max(self, max): if hasattr(self._field, 'converter') and max is not None: self._max = self._field.converter.parse(max)[0] else: self._max = max @max.deleter def max(self): self._max = None @property def max_inclusive(self): """When `True`, the domain includes the maximum value.""" return self._max_inclusive @max_inclusive.setter def max_inclusive(self, inclusive): if inclusive == 'yes': self._max_inclusive = True elif inclusive == 'no': self._max_inclusive = False else: self._max_inclusive = bool(inclusive) @max_inclusive.deleter def max_inclusive(self): self._max_inclusive = True @property def options(self): """ A list of string key-value tuples defining other OPTION elements for the domain. All options are ignored -- they are stored for round-tripping purposes only. """ return self._options def parse(self, iterator, config): if self.ref is not None: for start, tag, data, pos in iterator: if start: warn_or_raise(W44, W44, tag, config, pos) else: if tag != 'VALUES': warn_or_raise(W44, W44, tag, config, pos) break else: for start, tag, data, pos in iterator: if start: if tag == 'MIN': if 'value' not in data: vo_raise(E09, 'MIN', config, pos) self.min = data['value'] self.min_inclusive = data.get('inclusive', 'yes') warn_unknown_attrs( 'MIN', data.keys(), config, pos, ['value', 'inclusive']) elif tag == 'MAX': if 'value' not in data: vo_raise(E09, 'MAX', config, pos) self.max = data['value'] self.max_inclusive = data.get('inclusive', 'yes') warn_unknown_attrs( 'MAX', data.keys(), config, pos, ['value', 'inclusive']) elif tag == 'OPTION': if 'value' not in data: vo_raise(E09, 'OPTION', config, pos) xmlutil.check_token( data.get('name'), 'name', config, pos) self.options.append( (data.get('name'), data.get('value'))) warn_unknown_attrs( 'OPTION', data.keys(), config, pos, ['data', 'name']) elif tag == 'VALUES': break return self def is_defaults(self): """ Are the settings on this ``VALUE`` element all the same as the XML defaults? """ # If there's nothing meaningful or non-default to write, # don't write anything. return (self.ref is None and self.null is None and self.ID is None and self.max is None and self.min is None and self.options == []) def to_xml(self, w, **kwargs): def yes_no(value): if value: return 'yes' return 'no' if self.is_defaults(): return if self.ref is not None: w.element('VALUES', attrib=w.object_attrs(self, ['ref'])) else: with w.tag('VALUES', attrib=w.object_attrs( self, ['ID', 'null', 'ref'])): if self.min is not None: w.element( 'MIN', value=self._field.converter.output(self.min, False), inclusive=yes_no(self.min_inclusive)) if self.max is not None: w.element( 'MAX', value=self._field.converter.output(self.max, False), inclusive=yes_no(self.max_inclusive)) for name, value in self.options: w.element( 'OPTION', name=name, value=value) def to_table_column(self, column): # Have the ref filled in here meta = {} for key in ['ID', 'null']: val = getattr(self, key, None) if val is not None: meta[key] = val if self.min is not None: meta['min'] = { 'value': self.min, 'inclusive': self.min_inclusive} if self.max is not None: meta['max'] = { 'value': self.max, 'inclusive': self.max_inclusive} if len(self.options): meta['options'] = dict(self.options) column.meta['values'] = meta def from_table_column(self, column): if column.info.meta is None or 'values' not in column.info.meta: return meta = column.info.meta['values'] for key in ['ID', 'null']: val = meta.get(key, None) if val is not None: setattr(self, key, val) if 'min' in meta: self.min = meta['min']['value'] self.min_inclusive = meta['min']['inclusive'] if 'max' in meta: self.max = meta['max']['value'] self.max_inclusive = meta['max']['inclusive'] if 'options' in meta: self._options = list(meta['options'].items()) class Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty, _UtypeProperty, _UcdProperty): """ FIELD_ element: describes the datatype of a particular column of data. The keyword arguments correspond to setting members of the same name, documented below. If *ID* is provided, it is used for the column name in the resulting recarray of the table. If no *ID* is provided, *name* is used instead. If neither is provided, an exception will be raised. """ _attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd', 'unit', 'width', 'precision', 'utype', 'ref'] _attr_list_12 = _attr_list_11 + ['xtype'] _element_name = 'FIELD' def __init__(self, votable, ID=None, name=None, datatype=None, arraysize=None, ucd=None, unit=None, width=None, precision=None, utype=None, ref=None, type=None, id=None, xtype=None, config=None, pos=None, **extra): if config is None: config = {} self._config = config self._pos = pos SimpleElement.__init__(self) if config.get('version_1_2_or_later'): self._attr_list = self._attr_list_12 else: self._attr_list = self._attr_list_11 if xtype is not None: warn_unknown_attrs(self._element_name, ['xtype'], config, pos) # TODO: REMOVE ME ---------------------------------------- # This is a terrible hack to support Simple Image Access # Protocol results from archive.noao.edu. It creates a field # for the coordinate projection type of type "double", which # actually contains character data. We have to hack the field # to store character data, or we can't read it in. A warning # will be raised when this happens. if (not config.get('pedantic') and name == 'cprojection' and ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and datatype == 'double'): datatype = 'char' arraysize = '3' vo_warn(W40, (), config, pos) # ---------------------------------------- self.description = None self._votable = votable self.ID = (resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)) self.name = name if name is None: if (self._element_name == 'PARAM' and not config.get('version_1_1_or_later')): pass else: warn_or_raise(W15, W15, self._element_name, config, pos) self.name = self.ID if self._ID is None and name is None: vo_raise(W12, self._element_name, config, pos) datatype_mapping = { 'string': 'char', 'unicodeString': 'unicodeChar', 'int16': 'short', 'int32': 'int', 'int64': 'long', 'float32': 'float', 'float64': 'double', # The following appear in some Vizier tables 'unsignedInt': 'long', 'unsignedShort': 'int' } datatype_mapping.update(config.get('datatype_mapping', {})) if datatype in datatype_mapping: warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]), config, pos) datatype = datatype_mapping[datatype] self.ref = ref self.datatype = datatype self.arraysize = arraysize self.ucd = ucd self.unit = unit self.width = width self.precision = precision self.utype = utype self.type = type self._links = HomogeneousList(Link) self.title = self.name self.values = Values(self._votable, self) self.xtype = xtype self._setup(config, pos) warn_unknown_attrs(self._element_name, extra.keys(), config, pos) @classmethod def uniqify_names(cls, fields): """ Make sure that all names and titles in a list of fields are unique, by appending numbers if necessary. """ unique = {} for field in fields: i = 2 new_id = field.ID while new_id in unique: new_id = field.ID + "_{:d}".format(i) i += 1 if new_id != field.ID: vo_warn(W32, (field.ID, new_id), field._config, field._pos) field.ID = new_id unique[new_id] = field.ID for field in fields: i = 2 if field.name is None: new_name = field.ID implicit = True else: new_name = field.name implicit = False if new_name != field.ID: while new_name in unique: new_name = field.name + " {:d}".format(i) i += 1 if (not implicit and new_name != field.name): vo_warn(W33, (field.name, new_name), field._config, field._pos) field._unique_name = new_name unique[new_name] = field.name def _setup(self, config, pos): if self.values._ref is not None: self.values.ref = self.values._ref self.converter = converters.get_converter(self, config, pos) @property def datatype(self): """ [*required*] The datatype of the column. Valid values (as defined by the spec) are: 'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long', 'char', 'unicodeChar', 'float', 'double', 'floatComplex', or 'doubleComplex' Many VOTABLE files in the wild use 'string' instead of 'char', so that is also a valid option, though 'string' will always be converted to 'char' when writing the file back out. """ return self._datatype @datatype.setter def datatype(self, datatype): if datatype is None: if self._config.get('version_1_1_or_later'): warn_or_raise(E10, E10, self._element_name, self._config, self._pos) datatype = 'char' if datatype not in converters.converter_mapping: vo_raise(E06, (datatype, self.ID), self._config, self._pos) self._datatype = datatype @property def precision(self): """ Along with :attr:`width`, defines the `numerical accuracy`_ associated with the data. These values are used to limit the precision when writing floating point values back to the XML file. Otherwise, it is purely informational -- the Numpy recarray containing the data itself does not use this information. """ return self._precision @precision.setter def precision(self, precision): if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision): vo_raise(E11, precision, self._config, self._pos) self._precision = precision @precision.deleter def precision(self): self._precision = None @property def width(self): """ Along with :attr:`precision`, defines the `numerical accuracy`_ associated with the data. These values are used to limit the precision when writing floating point values back to the XML file. Otherwise, it is purely informational -- the Numpy recarray containing the data itself does not use this information. """ return self._width @width.setter def width(self, width): if width is not None: width = int(width) if width <= 0: vo_raise(E12, width, self._config, self._pos) self._width = width @width.deleter def width(self): self._width = None # ref on FIELD and PARAM behave differently than elsewhere -- here # they're just informational, such as to refer to a coordinate # system. @property def ref(self): """ On FIELD_ elements, ref is used only for informational purposes, for example to refer to a COOSYS_ element. """ return self._ref @ref.setter def ref(self, ref): xmlutil.check_id(ref, 'ref', self._config, self._pos) self._ref = ref @ref.deleter def ref(self): self._ref = None @property def unit(self): """A string specifying the units_ for the FIELD_.""" return self._unit @unit.setter def unit(self, unit): if unit is None: self._unit = None return from ... import units as u # First, parse the unit in the default way, so that we can # still emit a warning if the unit is not to spec. default_format = _get_default_unit_format(self._config) unit_obj = u.Unit( unit, format=default_format, parse_strict='silent') if isinstance(unit_obj, u.UnrecognizedUnit): warn_or_raise(W50, W50, (unit,), self._config, self._pos) format = _get_unit_format(self._config) if format != default_format: unit_obj = u.Unit( unit, format=format, parse_strict='silent') self._unit = unit_obj @unit.deleter def unit(self): self._unit = None @property def arraysize(self): """ Specifies the size of the multidimensional array if this FIELD_ contains more than a single value. See `multidimensional arrays`_. """ return self._arraysize @arraysize.setter def arraysize(self, arraysize): if (arraysize is not None and not re.match(r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize)): vo_raise(E13, arraysize, self._config, self._pos) self._arraysize = arraysize @arraysize.deleter def arraysize(self): self._arraysize = None @property def type(self): """ The type attribute on FIELD_ elements is reserved for future extensions. """ return self._type @type.setter def type(self, type): self._type = type @type.deleter def type(self): self._type = None @property def values(self): """ A :class:`Values` instance (or `None`) defining the domain of the column. """ return self._values @values.setter def values(self, values): assert values is None or isinstance(values, Values) self._values = values @values.deleter def values(self): self._values = None @property def links(self): """ A list of :class:`Link` instances used to reference more details about the meaning of the FIELD_. This is purely informational and is not used by the `astropy.io.votable` package. """ return self._links def parse(self, iterator, config): for start, tag, data, pos in iterator: if start: if tag == 'VALUES': self.values.__init__( self._votable, self, config=config, pos=pos, **data) self.values.parse(iterator, config) elif tag == 'LINK': link = Link(config=config, pos=pos, **data) self.links.append(link) link.parse(iterator, config) elif tag == 'DESCRIPTION': warn_unknown_attrs( 'DESCRIPTION', data.keys(), config, pos) elif tag != self._element_name: self._add_unknown_tag(iterator, tag, data, config, pos) else: if tag == 'DESCRIPTION': if self.description is not None: warn_or_raise( W17, W17, self._element_name, config, pos) self.description = data or None elif tag == self._element_name: break if self.description is not None: self.title = " ".join(x.strip() for x in self.description.splitlines()) else: self.title = self.name self._setup(config, pos) return self def to_xml(self, w, **kwargs): attrib = w.object_attrs(self, self._attr_list) if 'unit' in attrib: attrib['unit'] = self.unit.to_string('cds') with w.tag(self._element_name, attrib=attrib): if self.description is not None: w.element('DESCRIPTION', self.description, wrap=True) if not self.values.is_defaults(): self.values.to_xml(w, **kwargs) for link in self.links: link.to_xml(w, **kwargs) def to_table_column(self, column): """ Sets the attributes of a given `astropy.table.Column` instance to match the information in this `Field`. """ for key in ['ucd', 'width', 'precision', 'utype', 'xtype']: val = getattr(self, key, None) if val is not None: column.meta[key] = val if not self.values.is_defaults(): self.values.to_table_column(column) for link in self.links: link.to_table_column(column) if self.description is not None: column.description = self.description if self.unit is not None: # TODO: Use units framework when it's available column.unit = self.unit if isinstance(self.converter, converters.FloatingPoint): column.format = self.converter.output_format @classmethod def from_table_column(cls, votable, column): """ Restores a `Field` instance from a given `astropy.table.Column` instance. """ kwargs = {} meta = column.info.meta if meta: for key in ['ucd', 'width', 'precision', 'utype', 'xtype']: val = meta.get(key, None) if val is not None: kwargs[key] = val # TODO: Use the unit framework when available if column.info.unit is not None: kwargs['unit'] = column.info.unit kwargs['name'] = column.info.name result = converters.table_column_to_votable_datatype(column) kwargs.update(result) field = cls(votable, **kwargs) if column.info.description is not None: field.description = column.info.description field.values.from_table_column(column) if meta and 'links' in meta: for link in meta['links']: field.links.append(Link.from_table_column(link)) # TODO: Parse format into precision and width return field class Param(Field): """ PARAM_ element: constant-valued columns in the data. :class:`Param` objects are a subclass of :class:`Field`, and have all of its methods and members. Additionally, it defines :attr:`value`. """ _attr_list_11 = Field._attr_list_11 + ['value'] _attr_list_12 = Field._attr_list_12 + ['value'] _element_name = 'PARAM' def __init__(self, votable, ID=None, name=None, value=None, datatype=None, arraysize=None, ucd=None, unit=None, width=None, precision=None, utype=None, type=None, id=None, config=None, pos=None, **extra): self._value = value Field.__init__(self, votable, ID=ID, name=name, datatype=datatype, arraysize=arraysize, ucd=ucd, unit=unit, precision=precision, utype=utype, type=type, id=id, config=config, pos=pos, **extra) @property def value(self): """ [*required*] The constant value of the parameter. Its type is determined by the :attr:`~Field.datatype` member. """ return self._value @value.setter def value(self, value): if value is None: value = "" if isinstance(value, str): self._value = self.converter.parse( value, self._config, self._pos)[0] else: self._value = value def _setup(self, config, pos): Field._setup(self, config, pos) self.value = self._value def to_xml(self, w, **kwargs): tmp_value = self._value self._value = self.converter.output(tmp_value, False) # We must always have a value if self._value is None: self._value = "" Field.to_xml(self, w, **kwargs) self._value = tmp_value class CooSys(SimpleElement): """ COOSYS_ element: defines a coordinate system. The keyword arguments correspond to setting members of the same name, documented below. """ _attr_list = ['ID', 'equinox', 'epoch', 'system'] _element_name = 'COOSYS' def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None, config=None, pos=None, **extra): if config is None: config = {} self._config = config self._pos = pos if config.get('version_1_2_or_later'): warn_or_raise(W27, W27, (), config, pos) SimpleElement.__init__(self) self.ID = resolve_id(ID, id, config, pos) self.equinox = equinox self.epoch = epoch self.system = system warn_unknown_attrs('COOSYS', extra.keys(), config, pos) @property def ID(self): """ [*required*] The XML ID of the COOSYS_ element, used for cross-referencing. May be `None` or a string conforming to XML ID_ syntax. """ return self._ID @ID.setter def ID(self, ID): if self._config.get('version_1_1_or_later'): if ID is None: vo_raise(E15, (), self._config, self._pos) xmlutil.check_id(ID, 'ID', self._config, self._pos) self._ID = ID @property def system(self): """ Specifies the type of coordinate system. Valid choices are: 'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic', 'supergalactic', 'xy', 'barycentric', or 'geo_app' """ return self._system @system.setter def system(self, system): if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic', 'supergalactic', 'xy', 'barycentric', 'geo_app'): warn_or_raise(E16, E16, system, self._config, self._pos) self._system = system @system.deleter def system(self): self._system = None @property def equinox(self): """ A parameter required to fix the equatorial or ecliptic systems (as e.g. "J2000" as the default "eq_FK5" or "B1950" as the default "eq_FK4"). """ return self._equinox @equinox.setter def equinox(self, equinox): check_astroyear(equinox, 'equinox', self._config, self._pos) self._equinox = equinox @equinox.deleter def equinox(self): self._equinox = None @property def epoch(self): """ Specifies the epoch of the positions. It must be a string specifying an astronomical year. """ return self._epoch @epoch.setter def epoch(self, epoch): check_astroyear(epoch, 'epoch', self._config, self._pos) self._epoch = epoch @epoch.deleter def epoch(self): self._epoch = None class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty): """ FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements. """ _attr_list_11 = ['ref'] _attr_list_12 = _attr_list_11 + ['ucd', 'utype'] _element_name = "FIELDref" _utype_in_v1_2 = True _ucd_in_v1_2 = True def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None, **extra): """ *table* is the :class:`Table` object that this :class:`FieldRef` is a member of. *ref* is the ID to reference a :class:`Field` object defined elsewhere. """ if config is None: config = {} self._config = config self._pos = pos SimpleElement.__init__(self) self._table = table self.ref = ref self.ucd = ucd self.utype = utype if config.get('version_1_2_or_later'): self._attr_list = self._attr_list_12 else: self._attr_list = self._attr_list_11 if ucd is not None: warn_unknown_attrs(self._element_name, ['ucd'], config, pos) if utype is not None: warn_unknown_attrs(self._element_name, ['utype'], config, pos) @property def ref(self): """The ID_ of the FIELD_ that this FIELDref_ references.""" return self._ref @ref.setter def ref(self, ref): xmlutil.check_id(ref, 'ref', self._config, self._pos) self._ref = ref @ref.deleter def ref(self): self._ref = None def get_ref(self): """ Lookup the :class:`Field` instance that this :class:`FieldRef` references. """ for field in self._table._votable.iter_fields_and_params(): if isinstance(field, Field) and field.ID == self.ref: return field vo_raise( "No field named '{}'".format(self.ref), self._config, self._pos, KeyError) class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty): """ PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements. The keyword arguments correspond to setting members of the same name, documented below. It contains the following publicly-accessible members: *ref*: An XML ID referring to a <PARAM> element. """ _attr_list_11 = ['ref'] _attr_list_12 = _attr_list_11 + ['ucd', 'utype'] _element_name = "PARAMref" _utype_in_v1_2 = True _ucd_in_v1_2 = True def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None): if config is None: config = {} self._config = config self._pos = pos Element.__init__(self) self._table = table self.ref = ref self.ucd = ucd self.utype = utype if config.get('version_1_2_or_later'): self._attr_list = self._attr_list_12 else: self._attr_list = self._attr_list_11 if ucd is not None: warn_unknown_attrs(self._element_name, ['ucd'], config, pos) if utype is not None: warn_unknown_attrs(self._element_name, ['utype'], config, pos) @property def ref(self): """The ID_ of the PARAM_ that this PARAMref_ references.""" return self._ref @ref.setter def ref(self, ref): xmlutil.check_id(ref, 'ref', self._config, self._pos) self._ref = ref @ref.deleter def ref(self): self._ref = None def get_ref(self): """ Lookup the :class:`Param` instance that this :class:``PARAMref`` references. """ for param in self._table._votable.iter_fields_and_params(): if isinstance(param, Param) and param.ID == self.ref: return param vo_raise( "No params named '{}'".format(self.ref), self._config, self._pos, KeyError) class Group(Element, _IDProperty, _NameProperty, _UtypeProperty, _UcdProperty, _DescriptionProperty): """ GROUP_ element: groups FIELD_ and PARAM_ elements. This information is currently ignored by the vo package---that is the columns in the recarray are always flat---but the grouping information is stored so that it can be written out again to the XML file. The keyword arguments correspond to setting members of the same name, documented below. """ def __init__(self, table, ID=None, name=None, ref=None, ucd=None, utype=None, id=None, config=None, pos=None, **extra): if config is None: config = {} self._config = config self._pos = pos Element.__init__(self) self._table = table self.ID = (resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)) self.name = name self.ref = ref self.ucd = ucd self.utype = utype self.description = None self._entries = HomogeneousList( (FieldRef, ParamRef, Group, Param)) warn_unknown_attrs('GROUP', extra.keys(), config, pos) def __repr__(self): return '<GROUP>... {0} entries ...</GROUP>'.format(len(self._entries)) @property def ref(self): """ Currently ignored, as it's not clear from the spec how this is meant to work. """ return self._ref @ref.setter def ref(self, ref): xmlutil.check_id(ref, 'ref', self._config, self._pos) self._ref = ref @ref.deleter def ref(self): self._ref = None @property def entries(self): """ [read-only] A list of members of the GROUP_. This list may only contain objects of type :class:`Param`, :class:`Group`, :class:`ParamRef` and :class:`FieldRef`. """ return self._entries def _add_fieldref(self, iterator, tag, data, config, pos): fieldref = FieldRef(self._table, config=config, pos=pos, **data) self.entries.append(fieldref) def _add_paramref(self, iterator, tag, data, config, pos): paramref = ParamRef(self._table, config=config, pos=pos, **data) self.entries.append(paramref) def _add_param(self, iterator, tag, data, config, pos): if isinstance(self._table, VOTableFile): votable = self._table else: votable = self._table._votable param = Param(votable, config=config, pos=pos, **data) self.entries.append(param) param.parse(iterator, config) def _add_group(self, iterator, tag, data, config, pos): group = Group(self._table, config=config, pos=pos, **data) self.entries.append(group) group.parse(iterator, config) def parse(self, iterator, config): tag_mapping = { 'FIELDref': self._add_fieldref, 'PARAMref': self._add_paramref, 'PARAM': self._add_param, 'GROUP': self._add_group, 'DESCRIPTION': self._ignore_add} for start, tag, data, pos in iterator: if start: tag_mapping.get(tag, self._add_unknown_tag)( iterator, tag, data, config, pos) else: if tag == 'DESCRIPTION': if self.description is not None: warn_or_raise(W17, W17, 'GROUP', config, pos) self.description = data or None elif tag == 'GROUP': break return self def to_xml(self, w, **kwargs): with w.tag( 'GROUP', attrib=w.object_attrs( self, ['ID', 'name', 'ref', 'ucd', 'utype'])): if self.description is not None: w.element("DESCRIPTION", self.description, wrap=True) for entry in self.entries: entry.to_xml(w, **kwargs) def iter_fields_and_params(self): """ Recursively iterate over all :class:`Param` elements in this :class:`Group`. """ for entry in self.entries: if isinstance(entry, Param): yield entry elif isinstance(entry, Group): for field in entry.iter_fields_and_params(): yield field def iter_groups(self): """ Recursively iterate over all sub-:class:`Group` instances in this :class:`Group`. """ for entry in self.entries: if isinstance(entry, Group): yield entry for group in entry.iter_groups(): yield group class Table(Element, _IDProperty, _NameProperty, _UcdProperty, _DescriptionProperty): """ TABLE_ element: optionally contains data. It contains the following publicly-accessible and mutable attribute: *array*: A Numpy masked array of the data itself, where each row is a row of votable data, and columns are named and typed based on the <FIELD> elements of the table. The mask is parallel to the data array, except for variable-length fields. For those fields, the numpy array's column type is "object" (``"O"``), and another masked array is stored there. If the Table contains no data, (for example, its enclosing :class:`Resource` has :attr:`~Resource.type` == 'meta') *array* will have zero-length. The keyword arguments correspond to setting members of the same name, documented below. """ def __init__(self, votable, ID=None, name=None, ref=None, ucd=None, utype=None, nrows=None, id=None, config=None, pos=None, **extra): if config is None: config = {} self._config = config self._pos = pos self._empty = False Element.__init__(self) self._votable = votable self.ID = (resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)) self.name = name xmlutil.check_id(ref, 'ref', config, pos) self._ref = ref self.ucd = ucd self.utype = utype if nrows is not None: nrows = int(nrows) if nrows < 0: raise ValueError("'nrows' cannot be negative.") self._nrows = nrows self.description = None self.format = 'tabledata' self._fields = HomogeneousList(Field) self._params = HomogeneousList(Param) self._groups = HomogeneousList(Group) self._links = HomogeneousList(Link) self._infos = HomogeneousList(Info) self.array = ma.array([]) warn_unknown_attrs('TABLE', extra.keys(), config, pos) def __repr__(self): return repr(self.to_table()) def __bytes__(self): return bytes(self.to_table()) def __str__(self): return str(self.to_table()) @property def ref(self): return self._ref @ref.setter def ref(self, ref): """ Refer to another TABLE, previously defined, by the *ref* ID_ for all metadata (FIELD_, PARAM_ etc.) information. """ # When the ref changes, we want to verify that it will work # by actually going and looking for the referenced table. # If found, set a bunch of properties in this table based # on the other one. xmlutil.check_id(ref, 'ref', self._config, self._pos) if ref is not None: try: table = self._votable.get_table_by_id(ref, before=self) except KeyError: warn_or_raise( W43, W43, ('TABLE', self.ref), self._config, self._pos) ref = None else: self._fields = table.fields self._params = table.params self._groups = table.groups self._links = table.links else: del self._fields[:] del self._params[:] del self._groups[:] del self._links[:] self._ref = ref @ref.deleter def ref(self): self._ref = None @property def format(self): """ [*required*] The serialization format of the table. Must be one of: 'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_) 'fits' (FITS_). Note that the 'fits' format, since it requires an external file, can not be written out. Any file read in with 'fits' format will be read out, by default, in 'tabledata' format. See :ref:`votable-serialization`. """ return self._format @format.setter def format(self, format): format = format.lower() if format == 'fits': vo_raise("fits format can not be written out, only read.", self._config, self._pos, NotImplementedError) if format == 'binary2': if not self._config['version_1_3_or_later']: vo_raise( "binary2 only supported in votable 1.3 or later", self._config, self._pos) elif format not in ('tabledata', 'binary'): vo_raise("Invalid format '{}'".format(format), self._config, self._pos) self._format = format @property def nrows(self): """ [*immutable*] The number of rows in the table, as specified in the XML file. """ return self._nrows @property def fields(self): """ A list of :class:`Field` objects describing the types of each of the data columns. """ return self._fields @property def params(self): """ A list of parameters (constant-valued columns) for the table. Must contain only :class:`Param` objects. """ return self._params @property def groups(self): """ A list of :class:`Group` objects describing how the columns and parameters are grouped. Currently this information is only kept around for round-tripping and informational purposes. """ return self._groups @property def links(self): """ A list of :class:`Link` objects (pointers to other documents or servers through a URI) for the table. """ return self._links @property def infos(self): """ A list of :class:`Info` objects for the table. Allows for post-operational diagnostics. """ return self._infos def is_empty(self): """ Returns True if this table doesn't contain any real data because it was skipped over by the parser (through use of the ``table_number`` kwarg). """ return self._empty def create_arrays(self, nrows=0, config=None): """ Create a new array to hold the data based on the current set of fields, and store them in the *array* and member variable. Any data in the existing array will be lost. *nrows*, if provided, is the number of rows to allocate. """ if nrows is None: nrows = 0 fields = self.fields if len(fields) == 0: array = np.recarray((nrows,), dtype='O') mask = np.zeros((nrows,), dtype='b') else: # for field in fields: field._setup(config) Field.uniqify_names(fields) dtype = [] for x in fields: if x._unique_name == x.ID: id = x.ID else: id = (x._unique_name, x.ID) dtype.append((id, x.converter.format)) array = np.recarray((nrows,), dtype=np.dtype(dtype)) descr_mask = [] for d in array.dtype.descr: new_type = (d[1][1] == 'O' and 'O') or 'bool' if len(d) == 2: descr_mask.append((d[0], new_type)) elif len(d) == 3: descr_mask.append((d[0], new_type, d[2])) mask = np.zeros((nrows,), dtype=descr_mask) self.array = ma.array(array, mask=mask) def _resize_strategy(self, size): """ Return a new (larger) size based on size, used for reallocating an array when it fills up. This is in its own function so the resizing strategy can be easily replaced. """ # Once we go beyond 0, make a big step -- after that use a # factor of 1.5 to help keep memory usage compact if size == 0: return 512 return int(np.ceil(size * RESIZE_AMOUNT)) def _add_field(self, iterator, tag, data, config, pos): field = Field(self._votable, config=config, pos=pos, **data) self.fields.append(field) field.parse(iterator, config) def _add_param(self, iterator, tag, data, config, pos): param = Param(self._votable, config=config, pos=pos, **data) self.params.append(param) param.parse(iterator, config) def _add_group(self, iterator, tag, data, config, pos): group = Group(self, config=config, pos=pos, **data) self.groups.append(group) group.parse(iterator, config) def _add_link(self, iterator, tag, data, config, pos): link = Link(config=config, pos=pos, **data) self.links.append(link) link.parse(iterator, config) def _add_info(self, iterator, tag, data, config, pos): if not config.get('version_1_2_or_later'): warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos) info = Info(config=config, pos=pos, **data) self.infos.append(info) info.parse(iterator, config) def parse(self, iterator, config): columns = config.get('columns') # If we've requested to read in only a specific table, skip # all others table_number = config.get('table_number') current_table_number = config.get('_current_table_number') skip_table = False if current_table_number is not None: config['_current_table_number'] += 1 if (table_number is not None and table_number != current_table_number): skip_table = True self._empty = True table_id = config.get('table_id') if table_id is not None: if table_id != self.ID: skip_table = True self._empty = True if self.ref is not None: # This table doesn't have its own datatype descriptors, it # just references those from another table. # This is to call the property setter to go and get the # referenced information self.ref = self.ref for start, tag, data, pos in iterator: if start: if tag == 'DATA': warn_unknown_attrs( 'DATA', data.keys(), config, pos) break else: if tag == 'TABLE': return self elif tag == 'DESCRIPTION': if self.description is not None: warn_or_raise(W17, W17, 'RESOURCE', config, pos) self.description = data or None else: tag_mapping = { 'FIELD': self._add_field, 'PARAM': self._add_param, 'GROUP': self._add_group, 'LINK': self._add_link, 'INFO': self._add_info, 'DESCRIPTION': self._ignore_add} for start, tag, data, pos in iterator: if start: if tag == 'DATA': warn_unknown_attrs( 'DATA', data.keys(), config, pos) break tag_mapping.get(tag, self._add_unknown_tag)( iterator, tag, data, config, pos) else: if tag == 'DESCRIPTION': if self.description is not None: warn_or_raise(W17, W17, 'RESOURCE', config, pos) self.description = data or None elif tag == 'TABLE': # For error checking purposes Field.uniqify_names(self.fields) # We still need to create arrays, even if the file # contains no DATA section self.create_arrays(nrows=0, config=config) return self self.create_arrays(nrows=self._nrows, config=config) fields = self.fields names = [x.ID for x in fields] # Deal with a subset of the columns, if requested. if not columns: colnumbers = list(range(len(fields))) else: if isinstance(columns, str): columns = [columns] columns = np.asarray(columns) if issubclass(columns.dtype.type, np.integer): if np.any(columns < 0) or np.any(columns > len(fields)): raise ValueError( "Some specified column numbers out of range") colnumbers = columns elif issubclass(columns.dtype.type, np.character): try: colnumbers = [names.index(x) for x in columns] except ValueError: raise ValueError( "Columns '{}' not found in fields list".format(columns)) else: raise TypeError("Invalid columns list") if not skip_table: for start, tag, data, pos in iterator: if start: if tag == 'TABLEDATA': warn_unknown_attrs( 'TABLEDATA', data.keys(), config, pos) self.array = self._parse_tabledata( iterator, colnumbers, fields, config) break elif tag == 'BINARY': warn_unknown_attrs( 'BINARY', data.keys(), config, pos) self.array = self._parse_binary( 1, iterator, colnumbers, fields, config, pos) break elif tag == 'BINARY2': if not config['version_1_3_or_later']: warn_or_raise( W52, W52, config['version'], config, pos) self.array = self._parse_binary( 2, iterator, colnumbers, fields, config, pos) break elif tag == 'FITS': warn_unknown_attrs( 'FITS', data.keys(), config, pos, ['extnum']) try: extnum = int(data.get('extnum', 0)) if extnum < 0: raise ValueError("'extnum' cannot be negative.") except ValueError: vo_raise(E17, (), config, pos) self.array = self._parse_fits( iterator, extnum, config) break else: warn_or_raise(W37, W37, tag, config, pos) break for start, tag, data, pos in iterator: if not start and tag == 'DATA': break for start, tag, data, pos in iterator: if start and tag == 'INFO': if not config.get('version_1_2_or_later'): warn_or_raise( W26, W26, ('INFO', 'TABLE', '1.2'), config, pos) info = Info(config=config, pos=pos, **data) self.infos.append(info) info.parse(iterator, config) elif not start and tag == 'TABLE': break return self def _parse_tabledata(self, iterator, colnumbers, fields, config): # Since we don't know the number of rows up front, we'll # reallocate the record array to make room as we go. This # prevents the need to scan through the XML twice. The # allocation is by factors of 1.5. invalid = config.get('invalid', 'exception') # Need to have only one reference so that we can resize the # array array = self.array del self.array parsers = [field.converter.parse for field in fields] binparsers = [field.converter.binparse for field in fields] numrows = 0 alloc_rows = len(array) colnumbers_bits = [i in colnumbers for i in range(len(fields))] row_default = [x.converter.default for x in fields] mask_default = [True] * len(fields) array_chunk = [] mask_chunk = [] chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE) for start, tag, data, pos in iterator: if tag == 'TR': # Now parse one row row = row_default[:] row_mask = mask_default[:] i = 0 for start, tag, data, pos in iterator: if start: binary = (data.get('encoding', None) == 'base64') warn_unknown_attrs( tag, data.keys(), config, pos, ['encoding']) else: if tag == 'TD': if i >= len(fields): vo_raise(E20, len(fields), config, pos) if colnumbers_bits[i]: try: if binary: rawdata = base64.b64decode( data.encode('ascii')) buf = io.BytesIO(rawdata) buf.seek(0) try: value, mask_value = binparsers[i]( buf.read) except Exception as e: vo_reraise( e, config, pos, "(in row {:d}, col '{}')".format( len(array_chunk), fields[i].ID)) else: try: value, mask_value = parsers[i]( data, config, pos) except Exception as e: vo_reraise( e, config, pos, "(in row {:d}, col '{}')".format( len(array_chunk), fields[i].ID)) except Exception as e: if invalid == 'exception': vo_reraise(e, config, pos) else: row[i] = value row_mask[i] = mask_value elif tag == 'TR': break else: self._add_unknown_tag( iterator, tag, data, config, pos) i += 1 if i < len(fields): vo_raise(E21, (i, len(fields)), config, pos) array_chunk.append(tuple(row)) mask_chunk.append(tuple(row_mask)) if len(array_chunk) == chunk_size: while numrows + chunk_size > alloc_rows: alloc_rows = self._resize_strategy(alloc_rows) if alloc_rows != len(array): array = _resize(array, alloc_rows) array[numrows:numrows + chunk_size] = array_chunk array.mask[numrows:numrows + chunk_size] = mask_chunk numrows += chunk_size array_chunk = [] mask_chunk = [] elif not start and tag == 'TABLEDATA': break # Now, resize the array to the exact number of rows we need and # put the last chunk values in there. alloc_rows = numrows + len(array_chunk) array = _resize(array, alloc_rows) array[numrows:] = array_chunk if alloc_rows != 0: array.mask[numrows:] = mask_chunk numrows += len(array_chunk) if (self.nrows is not None and self.nrows >= 0 and self.nrows != numrows): warn_or_raise(W18, W18, (self.nrows, numrows), config, pos) self._nrows = numrows return array def _get_binary_data_stream(self, iterator, config): have_local_stream = False for start, tag, data, pos in iterator: if tag == 'STREAM': if start: warn_unknown_attrs( 'STREAM', data.keys(), config, pos, ['type', 'href', 'actuate', 'encoding', 'expires', 'rights']) if 'href' not in data: have_local_stream = True if data.get('encoding', None) != 'base64': warn_or_raise( W38, W38, data.get('encoding', None), config, pos) else: href = data['href'] xmlutil.check_anyuri(href, config, pos) encoding = data.get('encoding', None) else: buffer = data break if have_local_stream: buffer = base64.b64decode(buffer.encode('ascii')) string_io = io.BytesIO(buffer) string_io.seek(0) read = string_io.read else: if not href.startswith(('http', 'ftp', 'file')): vo_raise( "The vo package only supports remote data through http, " + "ftp or file", self._config, self._pos, NotImplementedError) fd = urllib.request.urlopen(href) if encoding is not None: if encoding == 'gzip': fd = gzip.GzipFile(href, 'rb', fileobj=fd) elif encoding == 'base64': fd = codecs.EncodedFile(fd, 'base64') else: vo_raise( "Unknown encoding type '{}'".format(encoding), self._config, self._pos, NotImplementedError) read = fd.read def careful_read(length): result = read(length) if len(result) != length: raise EOFError return result return careful_read def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos): fields = self.fields careful_read = self._get_binary_data_stream(iterator, config) # Need to have only one reference so that we can resize the # array array = self.array del self.array binparsers = [field.converter.binparse for field in fields] numrows = 0 alloc_rows = len(array) while True: # Resize result arrays if necessary if numrows >= alloc_rows: alloc_rows = self._resize_strategy(alloc_rows) array = _resize(array, alloc_rows) row_data = [] row_mask_data = [] try: if mode == 2: mask_bits = careful_read(int((len(fields) + 7) / 8)) row_mask_data = list(converters.bitarray_to_bool( mask_bits, len(fields))) for i, binparse in enumerate(binparsers): try: value, value_mask = binparse(careful_read) except EOFError: raise except Exception as e: vo_reraise( e, config, pos, "(in row {:d}, col '{}')".format( numrows, fields[i].ID)) row_data.append(value) if mode == 1: row_mask_data.append(value_mask) else: row_mask_data[i] = row_mask_data[i] or value_mask except EOFError: break row = [x.converter.default for x in fields] row_mask = [False] * len(fields) for i in colnumbers: row[i] = row_data[i] row_mask[i] = row_mask_data[i] array[numrows] = tuple(row) array.mask[numrows] = tuple(row_mask) numrows += 1 array = _resize(array, numrows) return array def _parse_fits(self, iterator, extnum, config): for start, tag, data, pos in iterator: if tag == 'STREAM': if start: warn_unknown_attrs( 'STREAM', data.keys(), config, pos, ['type', 'href', 'actuate', 'encoding', 'expires', 'rights']) href = data['href'] encoding = data.get('encoding', None) else: break if not href.startswith(('http', 'ftp', 'file')): vo_raise( "The vo package only supports remote data through http, " "ftp or file", self._config, self._pos, NotImplementedError) fd = urllib.request.urlopen(href) if encoding is not None: if encoding == 'gzip': fd = gzip.GzipFile(href, 'r', fileobj=fd) elif encoding == 'base64': fd = codecs.EncodedFile(fd, 'base64') else: vo_raise( "Unknown encoding type '{}'".format(encoding), self._config, self._pos, NotImplementedError) hdulist = fits.open(fd) array = hdulist[int(extnum)].data if array.dtype != self.array.dtype: warn_or_raise(W19, W19, (), self._config, self._pos) return array def to_xml(self, w, **kwargs): specified_format = kwargs.get('tabledata_format') if specified_format is not None: format = specified_format else: format = self.format if format == 'fits': format = 'tabledata' with w.tag( 'TABLE', attrib=w.object_attrs( self, ('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))): if self.description is not None: w.element("DESCRIPTION", self.description, wrap=True) for element_set in (self.fields, self.params): for element in element_set: element._setup({}, None) if self.ref is None: for element_set in (self.fields, self.params, self.groups, self.links): for element in element_set: element.to_xml(w, **kwargs) elif kwargs['version_1_2_or_later']: index = list(self._votable.iter_tables()).index(self) group = Group(self, ID="_g{0}".format(index)) group.to_xml(w, **kwargs) if len(self.array): with w.tag('DATA'): if format == 'tabledata': self._write_tabledata(w, **kwargs) elif format == 'binary': self._write_binary(1, w, **kwargs) elif format == 'binary2': self._write_binary(2, w, **kwargs) if kwargs['version_1_2_or_later']: for element in self._infos: element.to_xml(w, **kwargs) def _write_tabledata(self, w, **kwargs): fields = self.fields array = self.array with w.tag('TABLEDATA'): w._flush() if (_has_c_tabledata_writer and not kwargs.get('_debug_python_based_parser')): supports_empty_values = [ field.converter.supports_empty_values(kwargs) for field in fields] fields = [field.converter.output for field in fields] indent = len(w._tags) - 1 tablewriter.write_tabledata( w.write, array.data, array.mask, fields, supports_empty_values, indent, 1 << 8) else: write = w.write indent_spaces = w.get_indentation_spaces() tr_start = indent_spaces + "<TR>\n" tr_end = indent_spaces + "</TR>\n" td = indent_spaces + " <TD>{}</TD>\n" td_empty = indent_spaces + " <TD/>\n" fields = [(i, field.converter.output, field.converter.supports_empty_values(kwargs)) for i, field in enumerate(fields)] for row in range(len(array)): write(tr_start) array_row = array.data[row] mask_row = array.mask[row] for i, output, supports_empty_values in fields: data = array_row[i] masked = mask_row[i] if supports_empty_values and np.all(masked): write(td_empty) else: try: val = output(data, masked) except Exception as e: vo_reraise( e, additional="(in row {:d}, col '{}')".format( row, self.fields[i].ID)) if len(val): write(td.format(val)) else: write(td_empty) write(tr_end) def _write_binary(self, mode, w, **kwargs): fields = self.fields array = self.array if mode == 1: tag_name = 'BINARY' else: tag_name = 'BINARY2' with w.tag(tag_name): with w.tag('STREAM', encoding='base64'): fields_basic = [(i, field.converter.binoutput) for (i, field) in enumerate(fields)] data = io.BytesIO() for row in range(len(array)): array_row = array.data[row] array_mask = array.mask[row] if mode == 2: flattened = np.array([np.all(x) for x in array_mask]) data.write(converters.bool_to_bitarray(flattened)) for i, converter in fields_basic: try: chunk = converter(array_row[i], array_mask[i]) assert type(chunk) == bytes except Exception as e: vo_reraise( e, additional="(in row {:d}, col '{}')".format( row, fields[i].ID)) data.write(chunk) w._flush() w.write(base64.b64encode(data.getvalue()).decode('ascii')) def to_table(self, use_names_over_ids=False): """ Convert this VO Table to an `astropy.table.Table` instance. Parameters ---------- use_names_over_ids : bool, optional When `True` use the ``name`` attributes of columns as the names of columns in the `astropy.table.Table` instance. Since names are not guaranteed to be unique, this may cause some columns to be renamed by appending numbers to the end. Otherwise (default), use the ID attributes as the column names. .. warning:: Variable-length array fields may not be restored identically when round-tripping through the `astropy.table.Table` instance. """ from ...table import Table meta = {} for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']: val = getattr(self, key, None) if val is not None: meta[key] = val if use_names_over_ids: names = [field.name for field in self.fields] unique_names = [] for i, name in enumerate(names): new_name = name i = 2 while new_name in unique_names: new_name = '{0}{1}'.format(name, i) i += 1 unique_names.append(new_name) array = self.array.copy() array.dtype.names = unique_names names = unique_names else: array = self.array names = [field.ID for field in self.fields] table = Table(self.array, meta=meta) for name, field in zip(names, self.fields): column = table[name] field.to_table_column(column) return table @classmethod def from_table(cls, votable, table): """ Create a `Table` instance from a given `astropy.table.Table` instance. """ kwargs = {} for key in ['ID', 'name', 'ref', 'ucd', 'utype']: val = table.meta.get(key) if val is not None: kwargs[key] = val new_table = cls(votable, **kwargs) if 'description' in table.meta: new_table.description = table.meta['description'] for colname in table.colnames: column = table[colname] new_table.fields.append(Field.from_table_column(votable, column)) if table.mask is None: new_table.array = ma.array(np.asarray(table)) else: new_table.array = ma.array(np.asarray(table), mask=np.asarray(table.mask)) return new_table def iter_fields_and_params(self): """ Recursively iterate over all FIELD and PARAM elements in the TABLE. """ for param in self.params: yield param for field in self.fields: yield field for group in self.groups: for field in group.iter_fields_and_params(): yield field get_field_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_fields_and_params', 'FIELD or PARAM', """ Looks up a FIELD or PARAM element by the given ID. """) get_field_by_id_or_name = _lookup_by_id_or_name_factory( 'iter_fields_and_params', 'FIELD or PARAM', """ Looks up a FIELD or PARAM element by the given ID or name. """) get_fields_by_utype = _lookup_by_attr_factory( 'utype', False, 'iter_fields_and_params', 'FIELD or PARAM', """ Looks up a FIELD or PARAM element by the given utype and returns an iterator emitting all matches. """) def iter_groups(self): """ Recursively iterate over all GROUP elements in the TABLE. """ for group in self.groups: yield group for g in group.iter_groups(): yield g get_group_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_groups', 'GROUP', """ Looks up a GROUP element by the given ID. Used by the group's "ref" attribute """) get_groups_by_utype = _lookup_by_attr_factory( 'utype', False, 'iter_groups', 'GROUP', """ Looks up a GROUP element by the given utype and returns an iterator emitting all matches. """) def iter_info(self): for info in self.infos: yield info class Resource(Element, _IDProperty, _NameProperty, _UtypeProperty, _DescriptionProperty): """ RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements. The keyword arguments correspond to setting members of the same name, documented below. """ def __init__(self, name=None, ID=None, utype=None, type='results', id=None, config=None, pos=None, **kwargs): if config is None: config = {} self._config = config self._pos = pos Element.__init__(self) self.name = name self.ID = resolve_id(ID, id, config, pos) self.utype = utype self.type = type self._extra_attributes = kwargs self.description = None self._coordinate_systems = HomogeneousList(CooSys) self._groups = HomogeneousList(Group) self._params = HomogeneousList(Param) self._infos = HomogeneousList(Info) self._links = HomogeneousList(Link) self._tables = HomogeneousList(Table) self._resources = HomogeneousList(Resource) warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos) def __repr__(self): buff = io.StringIO() w = XMLWriter(buff) w.element( self._element_name, attrib=w.object_attrs(self, self._attr_list)) return buff.getvalue().strip() @property def type(self): """ [*required*] The type of the resource. Must be either: - 'results': This resource contains actual result values (default) - 'meta': This resource contains only datatype descriptions (FIELD_ elements), but no actual data. """ return self._type @type.setter def type(self, type): if type not in ('results', 'meta'): vo_raise(E18, type, self._config, self._pos) self._type = type @property def extra_attributes(self): """ A dictionary of string keys to string values containing any extra attributes of the RESOURCE_ element that are not defined in the specification. (The specification explicitly allows for extra attributes here, but nowhere else.) """ return self._extra_attributes @property def coordinate_systems(self): """ A list of coordinate system definitions (COOSYS_ elements) for the RESOURCE_. Must contain only `CooSys` objects. """ return self._coordinate_systems @property def infos(self): """ A list of informational parameters (key-value pairs) for the resource. Must only contain `Info` objects. """ return self._infos @property def groups(self): """ A list of groups """ return self._groups @property def params(self): """ A list of parameters (constant-valued columns) for the resource. Must contain only `Param` objects. """ return self._params @property def links(self): """ A list of links (pointers to other documents or servers through a URI) for the resource. Must contain only `Link` objects. """ return self._links @property def tables(self): """ A list of tables in the resource. Must contain only `Table` objects. """ return self._tables @property def resources(self): """ A list of nested resources inside this resource. Must contain only `Resource` objects. """ return self._resources def _add_table(self, iterator, tag, data, config, pos): table = Table(self._votable, config=config, pos=pos, **data) self.tables.append(table) table.parse(iterator, config) def _add_info(self, iterator, tag, data, config, pos): info = Info(config=config, pos=pos, **data) self.infos.append(info) info.parse(iterator, config) def _add_group(self, iterator, tag, data, config, pos): group = Group(self, config=config, pos=pos, **data) self.groups.append(group) group.parse(iterator, config) def _add_param(self, iterator, tag, data, config, pos): param = Param(self._votable, config=config, pos=pos, **data) self.params.append(param) param.parse(iterator, config) def _add_coosys(self, iterator, tag, data, config, pos): coosys = CooSys(config=config, pos=pos, **data) self.coordinate_systems.append(coosys) coosys.parse(iterator, config) def _add_resource(self, iterator, tag, data, config, pos): resource = Resource(config=config, pos=pos, **data) self.resources.append(resource) resource.parse(self._votable, iterator, config) def _add_link(self, iterator, tag, data, config, pos): link = Link(config=config, pos=pos, **data) self.links.append(link) link.parse(iterator, config) def parse(self, votable, iterator, config): self._votable = votable tag_mapping = { 'TABLE': self._add_table, 'INFO': self._add_info, 'PARAM': self._add_param, 'GROUP' : self._add_group, 'COOSYS': self._add_coosys, 'RESOURCE': self._add_resource, 'LINK': self._add_link, 'DESCRIPTION': self._ignore_add } for start, tag, data, pos in iterator: if start: tag_mapping.get(tag, self._add_unknown_tag)( iterator, tag, data, config, pos) elif tag == 'DESCRIPTION': if self.description is not None: warn_or_raise(W17, W17, 'RESOURCE', config, pos) self.description = data or None elif tag == 'RESOURCE': break del self._votable return self def to_xml(self, w, **kwargs): attrs = w.object_attrs(self, ('ID', 'type', 'utype')) attrs.update(self.extra_attributes) with w.tag('RESOURCE', attrib=attrs): if self.description is not None: w.element("DESCRIPTION", self.description, wrap=True) for element_set in (self.coordinate_systems, self.params, self.infos, self.links, self.tables, self.resources): for element in element_set: element.to_xml(w, **kwargs) def iter_tables(self): """ Recursively iterates over all tables in the resource and nested resources. """ for table in self.tables: yield table for resource in self.resources: for table in resource.iter_tables(): yield table def iter_fields_and_params(self): """ Recursively iterates over all FIELD_ and PARAM_ elements in the resource, its tables and nested resources. """ for param in self.params: yield param for table in self.tables: for param in table.iter_fields_and_params(): yield param for resource in self.resources: for param in resource.iter_fields_and_params(): yield param def iter_coosys(self): """ Recursively iterates over all the COOSYS_ elements in the resource and nested resources. """ for coosys in self.coordinate_systems: yield coosys for resource in self.resources: for coosys in resource.iter_coosys(): yield coosys def iter_info(self): """ Recursively iterates over all the INFO_ elements in the resource and nested resources. """ for info in self.infos: yield info for table in self.tables: for info in table.iter_info(): yield info for resource in self.resources: for info in resource.iter_info(): yield info class VOTableFile(Element, _IDProperty, _DescriptionProperty): """ VOTABLE_ element: represents an entire file. The keyword arguments correspond to setting members of the same name, documented below. *version* is settable at construction time only, since conformance tests for building the rest of the structure depend on it. """ def __init__(self, ID=None, id=None, config=None, pos=None, version="1.3"): if config is None: config = {} self._config = config self._pos = pos Element.__init__(self) self.ID = resolve_id(ID, id, config, pos) self.description = None self._coordinate_systems = HomogeneousList(CooSys) self._params = HomogeneousList(Param) self._infos = HomogeneousList(Info) self._resources = HomogeneousList(Resource) self._groups = HomogeneousList(Group) version = str(version) if version not in ("1.0", "1.1", "1.2", "1.3"): raise ValueError("'version' should be one of '1.0', '1.1', " "'1.2', or '1.3'") self._version = version def __repr__(self): n_tables = len(list(self.iter_tables())) return '<VOTABLE>... {0} tables ...</VOTABLE>'.format(n_tables) @property def version(self): """ The version of the VOTable specification that the file uses. """ return self._version @version.setter def version(self, version): version = str(version) if version not in ('1.1', '1.2', '1.3'): raise ValueError( "astropy.io.votable only supports VOTable versions " "1.1, 1.2 and 1.3") self._version = version @property def coordinate_systems(self): """ A list of coordinate system descriptions for the file. Must contain only `CooSys` objects. """ return self._coordinate_systems @property def params(self): """ A list of parameters (constant-valued columns) that apply to the entire file. Must contain only `Param` objects. """ return self._params @property def infos(self): """ A list of informational parameters (key-value pairs) for the entire file. Must only contain `Info` objects. """ return self._infos @property def resources(self): """ A list of resources, in the order they appear in the file. Must only contain `Resource` objects. """ return self._resources @property def groups(self): """ A list of groups, in the order they appear in the file. Only supported as a child of the VOTABLE element in VOTable 1.2 or later. """ return self._groups def _add_param(self, iterator, tag, data, config, pos): param = Param(self, config=config, pos=pos, **data) self.params.append(param) param.parse(iterator, config) def _add_resource(self, iterator, tag, data, config, pos): resource = Resource(config=config, pos=pos, **data) self.resources.append(resource) resource.parse(self, iterator, config) def _add_coosys(self, iterator, tag, data, config, pos): coosys = CooSys(config=config, pos=pos, **data) self.coordinate_systems.append(coosys) coosys.parse(iterator, config) def _add_info(self, iterator, tag, data, config, pos): info = Info(config=config, pos=pos, **data) self.infos.append(info) info.parse(iterator, config) def _add_group(self, iterator, tag, data, config, pos): if not config.get('version_1_2_or_later'): warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos) group = Group(self, config=config, pos=pos, **data) self.groups.append(group) group.parse(iterator, config) def parse(self, iterator, config): config['_current_table_number'] = 0 for start, tag, data, pos in iterator: if start: if tag == 'xml': pass elif tag == 'VOTABLE': if 'version' not in data: warn_or_raise(W20, W20, self.version, config, pos) config['version'] = self.version else: config['version'] = self._version = data['version'] if config['version'].lower().startswith('v'): warn_or_raise( W29, W29, config['version'], config, pos) self._version = config['version'] = \ config['version'][1:] if config['version'] not in ('1.1', '1.2', '1.3'): vo_warn(W21, config['version'], config, pos) if 'xmlns' in data: correct_ns = ('http://www.ivoa.net/xml/VOTable/v{}'.format( config['version'])) if data['xmlns'] != correct_ns: vo_warn( W41, (correct_ns, data['xmlns']), config, pos) else: vo_warn(W42, (), config, pos) break else: vo_raise(E19, (), config, pos) config['version_1_1_or_later'] = \ util.version_compare(config['version'], '1.1') >= 0 config['version_1_2_or_later'] = \ util.version_compare(config['version'], '1.2') >= 0 config['version_1_3_or_later'] = \ util.version_compare(config['version'], '1.3') >= 0 tag_mapping = { 'PARAM': self._add_param, 'RESOURCE': self._add_resource, 'COOSYS': self._add_coosys, 'INFO': self._add_info, 'DEFINITIONS': self._add_definitions, 'DESCRIPTION': self._ignore_add, 'GROUP': self._add_group} for start, tag, data, pos in iterator: if start: tag_mapping.get(tag, self._add_unknown_tag)( iterator, tag, data, config, pos) elif tag == 'DESCRIPTION': if self.description is not None: warn_or_raise(W17, W17, 'VOTABLE', config, pos) self.description = data or None if not len(self.resources) and config['version_1_2_or_later']: warn_or_raise(W53, W53, (), config, pos) return self def to_xml(self, fd, compressed=False, tabledata_format=None, _debug_python_based_parser=False, _astropy_version=None): """ Write to an XML file. Parameters ---------- fd : str path or writable file-like object Where to write the file. compressed : bool, optional When `True`, write to a gzip-compressed file. (Default: `False`) tabledata_format : str, optional Override the format of the table(s) data to write. Must be one of ``tabledata`` (text representation), ``binary`` or ``binary2``. By default, use the format that was specified in each `Table` object as it was created or read in. See :ref:`votable-serialization`. """ if tabledata_format is not None: if tabledata_format.lower() not in ( 'tabledata', 'binary', 'binary2'): raise ValueError("Unknown format type '{0}'".format(format)) kwargs = { 'version': self.version, 'version_1_1_or_later': util.version_compare(self.version, '1.1') >= 0, 'version_1_2_or_later': util.version_compare(self.version, '1.2') >= 0, 'version_1_3_or_later': util.version_compare(self.version, '1.3') >= 0, 'tabledata_format': tabledata_format, '_debug_python_based_parser': _debug_python_based_parser, '_group_number': 1} with util.convert_to_writable_filelike( fd, compressed=compressed) as fd: w = XMLWriter(fd) version = self.version if _astropy_version is None: lib_version = astropy_version else: lib_version = _astropy_version xml_header = """ <?xml version="1.0" encoding="utf-8"?> <!-- Produced with astropy.io.votable version {lib_version} http://www.astropy.org/ -->\n""" w.write(xml_header.lstrip().format(**locals())) with w.tag('VOTABLE', {'version': version, 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance", 'xsi:noNamespaceSchemaLocation': "http://www.ivoa.net/xml/VOTable/v{}".format(version), 'xmlns': "http://www.ivoa.net/xml/VOTable/v{}".format(version)}): if self.description is not None: w.element("DESCRIPTION", self.description, wrap=True) element_sets = [self.coordinate_systems, self.params, self.infos, self.resources] if kwargs['version_1_2_or_later']: element_sets[0] = self.groups for element_set in element_sets: for element in element_set: element.to_xml(w, **kwargs) def iter_tables(self): """ Iterates over all tables in the VOTable file in a "flat" way, ignoring the nesting of resources etc. """ for resource in self.resources: for table in resource.iter_tables(): yield table def get_first_table(self): """ Often, you know there is only one table in the file, and that's all you need. This method returns that first table. """ for table in self.iter_tables(): if not table.is_empty(): return table raise IndexError("No table found in VOTABLE file.") get_table_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_tables', 'TABLE', """ Looks up a TABLE_ element by the given ID. Used by the table "ref" attribute. """) get_tables_by_utype = _lookup_by_attr_factory( 'utype', False, 'iter_tables', 'TABLE', """ Looks up a TABLE_ element by the given utype, and returns an iterator emitting all matches. """) def get_table_by_index(self, idx): """ Get a table by its ordinal position in the file. """ for i, table in enumerate(self.iter_tables()): if i == idx: return table raise IndexError( "No table at index {:d} found in VOTABLE file.".format(idx)) def iter_fields_and_params(self): """ Recursively iterate over all FIELD_ and PARAM_ elements in the VOTABLE_ file. """ for resource in self.resources: for field in resource.iter_fields_and_params(): yield field get_field_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_fields_and_params', 'FIELD', """ Looks up a FIELD_ element by the given ID_. Used by the field's "ref" attribute. """) get_fields_by_utype = _lookup_by_attr_factory( 'utype', False, 'iter_fields_and_params', 'FIELD', """ Looks up a FIELD_ element by the given utype and returns an iterator emitting all matches. """) get_field_by_id_or_name = _lookup_by_id_or_name_factory( 'iter_fields_and_params', 'FIELD', """ Looks up a FIELD_ element by the given ID_ or name. """) def iter_values(self): """ Recursively iterate over all VALUES_ elements in the VOTABLE_ file. """ for field in self.iter_fields_and_params(): yield field.values get_values_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_values', 'VALUES', """ Looks up a VALUES_ element by the given ID. Used by the values "ref" attribute. """) def iter_groups(self): """ Recursively iterate over all GROUP_ elements in the VOTABLE_ file. """ for table in self.iter_tables(): for group in table.iter_groups(): yield group get_group_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_groups', 'GROUP', """ Looks up a GROUP_ element by the given ID. Used by the group's "ref" attribute """) get_groups_by_utype = _lookup_by_attr_factory( 'utype', False, 'iter_groups', 'GROUP', """ Looks up a GROUP_ element by the given utype and returns an iterator emitting all matches. """) def iter_coosys(self): """ Recursively iterate over all COOSYS_ elements in the VOTABLE_ file. """ for coosys in self.coordinate_systems: yield coosys for resource in self.resources: for coosys in resource.iter_coosys(): yield coosys get_coosys_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_coosys', 'COOSYS', """Looks up a COOSYS_ element by the given ID.""") def iter_info(self): """ Recursively iterate over all INFO_ elements in the VOTABLE_ file. """ for info in self.infos: yield info for resource in self.resources: for info in resource.iter_info(): yield info get_info_by_id = _lookup_by_attr_factory( 'ID', True, 'iter_info', 'INFO', """Looks up a INFO element by the given ID.""") def set_all_tables_format(self, format): """ Set the output storage format of all tables in the file. """ for table in self.iter_tables(): table.format = format @classmethod def from_table(cls, table, table_id=None): """ Create a `VOTableFile` instance from a given `astropy.table.Table` instance. Parameters ---------- table_id : str, optional Set the given ID attribute on the returned Table instance. """ votable_file = cls() resource = Resource() votable = Table.from_table(votable_file, table) if table_id is not None: votable.ID = table_id resource.tables.append(votable) votable_file.resources.append(resource) return votable_file
2aa2ddb8a0dcd004bfc927cbb058757e7b2be915185a1e7f403171c1a9b8ae3d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file contains a contains the high-level functions to read a VOTable file. """ # STDLIB import io import os import sys import textwrap import warnings # LOCAL from . import exceptions from . import tree from ...utils.xml import iterparser from ...utils import data __all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate', 'reset_vo_warnings'] def parse(source, columns=None, invalid='exception', pedantic=None, chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None, table_id=None, filename=None, unit_format=None, datatype_mapping=None, _debug_python_based_parser=False): """ Parses a VOTABLE_ xml file (or file-like object), and returns a `~astropy.io.votable.tree.VOTableFile` object. Parameters ---------- source : str or readable file-like object Path or file object containing a VOTABLE_ xml file. columns : sequence of str, optional List of field names to include in the output. The default is to include all fields. invalid : str, optional One of the following values: - 'exception': throw an exception when an invalid value is encountered (default) - 'mask': mask out invalid values pedantic : bool, optional When `True`, raise an error when the file violates the spec, otherwise issue a warning. Warnings may be controlled using the standard Python mechanisms. See the `warnings` module in the Python standard library for more information. When not provided, uses the configuration setting ``astropy.io.votable.pedantic``, which defaults to False. chunk_size : int, optional The number of rows to read before converting to an array. Higher numbers are likely to be faster, but will consume more memory. table_number : int, optional The number of table in the file to read in. If `None`, all tables will be read. If a number, 0 refers to the first table in the file, and only that numbered table will be parsed and read in. Should not be used with ``table_id``. table_id : str, optional The ID of the table in the file to read in. Should not be used with ``table_number``. filename : str, optional A filename, URL or other identifier to use in error messages. If *filename* is None and *source* is a string (i.e. a path), then *source* will be used as a filename for error messages. Therefore, *filename* is only required when source is a file-like object. unit_format : str, astropy.units.format.Base instance or None, optional The unit format to use when parsing unit attributes. If a string, must be the name of a unit formatter. The built-in formats include ``generic``, ``fits``, ``cds``, and ``vounit``. A custom formatter may be provided by passing a `~astropy.units.UnitBase` instance. If `None` (default), the unit format to use will be the one specified by the VOTable specification (which is ``cds`` up to version 1.2 of VOTable, and (probably) ``vounit`` in future versions of the spec). datatype_mapping : dict of str to str, optional A mapping of datatype names to valid VOTable datatype names. For example, if the file being read contains the datatype "unsignedInt" (an invalid datatype in VOTable), include the mapping ``{"unsignedInt": "long"}``. Returns ------- votable : `~astropy.io.votable.tree.VOTableFile` object See also -------- astropy.io.votable.exceptions : The exceptions this function may raise. """ from . import conf invalid = invalid.lower() if invalid not in ('exception', 'mask'): raise ValueError("accepted values of ``invalid`` are: " "``'exception'`` or ``'mask'``.") if pedantic is None: pedantic = conf.pedantic if datatype_mapping is None: datatype_mapping = {} config = { 'columns': columns, 'invalid': invalid, 'pedantic': pedantic, 'chunk_size': chunk_size, 'table_number': table_number, 'filename': filename, 'unit_format': unit_format, 'datatype_mapping': datatype_mapping } if filename is None and isinstance(source, str): config['filename'] = source with iterparser.get_xml_iterator( source, _debug_python_based_parser=_debug_python_based_parser) as iterator: return tree.VOTableFile( config=config, pos=(1, 1)).parse(iterator, config) def parse_single_table(source, **kwargs): """ Parses a VOTABLE_ xml file (or file-like object), reading and returning only the first `~astropy.io.votable.tree.Table` instance. See `parse` for a description of the keyword arguments. Returns ------- votable : `~astropy.io.votable.tree.Table` object """ if kwargs.get('table_number') is None: kwargs['table_number'] = 0 votable = parse(source, **kwargs) return votable.get_first_table() def writeto(table, file, tabledata_format=None): """ Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file. Parameters ---------- table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance. file : str or writable file-like object Path or file object to write to tabledata_format : str, optional Override the format of the table(s) data to write. Must be one of ``tabledata`` (text representation), ``binary`` or ``binary2``. By default, use the format that was specified in each ``table`` object as it was created or read in. See :ref:`votable-serialization`. """ from ...table import Table if isinstance(table, Table): table = tree.VOTableFile.from_table(table) elif not isinstance(table, tree.VOTableFile): raise TypeError( "first argument must be astropy.io.vo.VOTableFile or " "astropy.table.Table instance") table.to_xml(file, tabledata_format=tabledata_format, _debug_python_based_parser=True) def validate(source, output=None, xmllint=False, filename=None): """ Prints a validation report for the given file. Parameters ---------- source : str or readable file-like object Path to a VOTABLE_ xml file or pathlib.path object having Path to a VOTABLE_ xml file. output : writable file-like object, optional Where to output the report. Defaults to ``sys.stdout``. If `None`, the output will be returned as a string. xmllint : bool, optional When `True`, also send the file to ``xmllint`` for schema and DTD validation. Requires that ``xmllint`` is installed. The default is `False`. ``source`` must be a file on the local filesystem in order for ``xmllint`` to work. filename : str, optional A filename to use in the error messages. If not provided, one will be automatically determined from ``source``. Returns ------- is_valid : bool or str Returns `True` if no warnings were found. If ``output`` is `None`, the return value will be a string. """ from ...utils.console import print_code_line, color_print if output is None: output = sys.stdout return_as_str = False if output is None: output = io.StringIO() lines = [] votable = None reset_vo_warnings() with data.get_readable_fileobj(source, encoding='binary') as fd: content = fd.read() content_buffer = io.BytesIO(content) content_buffer.seek(0) if filename is None: if isinstance(source, str): filename = source elif hasattr(source, 'name'): filename = source.name elif hasattr(source, 'url'): filename = source.url else: filename = "<unknown>" with warnings.catch_warnings(record=True) as warning_lines: warnings.resetwarnings() warnings.simplefilter("always", exceptions.VOWarning, append=True) try: votable = parse(content_buffer, pedantic=False, filename=filename) except ValueError as e: lines.append(str(e)) lines = [str(x.message) for x in warning_lines if issubclass(x.category, exceptions.VOWarning)] + lines content_buffer.seek(0) output.write("Validation report for {0}\n\n".format(filename)) if len(lines): xml_lines = iterparser.xml_readlines(content_buffer) for warning in lines: w = exceptions.parse_vowarning(warning) if not w['is_something']: output.write(w['message']) output.write('\n\n') else: line = xml_lines[w['nline'] - 1] warning = w['warning'] if w['is_warning']: color = 'yellow' else: color = 'red' color_print( '{0:d}: '.format(w['nline']), '', warning or 'EXC', color, ': ', '', textwrap.fill( w['message'], initial_indent=' ', subsequent_indent=' ').lstrip(), file=output) print_code_line(line, w['nchar'], file=output) output.write('\n') else: output.write('astropy.io.votable found no violations.\n\n') success = 0 if xmllint and os.path.exists(filename): from ...utils.xml import validate if votable is None: version = "1.1" else: version = votable.version success, stdout, stderr = validate.validate_schema( filename, version) if success != 0: output.write( 'xmllint schema violations:\n\n') output.write(stderr) else: output.write('xmllint passed\n') if return_as_str: return output.getvalue() return len(lines) == 0 and success == 0 def from_table(table, table_id=None): """ Given an `~astropy.table.Table` object, return a `~astropy.io.votable.tree.VOTableFile` file structure containing just that single table. Parameters ---------- table : `~astropy.table.Table` instance table_id : str, optional If not `None`, set the given id on the returned `~astropy.io.votable.tree.Table` instance. Returns ------- votable : `~astropy.io.votable.tree.VOTableFile` instance """ return tree.VOTableFile.from_table(table, table_id=table_id) def is_votable(source): """ Reads the header of a file to determine if it is a VOTable file. Parameters ---------- source : str or readable file-like object Path or file object containing a VOTABLE_ xml file. Returns ------- is_votable : bool Returns `True` if the given file is a VOTable file. """ try: with iterparser.get_xml_iterator(source) as iterator: for start, tag, data, pos in iterator: if tag != 'xml': return False break for start, tag, data, pos in iterator: if tag != 'VOTABLE': return False break return True except ValueError: return False def reset_vo_warnings(): """ Resets all of the vo warning state so that warnings that have already been emitted will be emitted again. This is used, for example, by `validate` which must emit all warnings each time it is called. """ from . import converters, xmlutil # -----------------------------------------------------------# # This is a special variable used by the Python warnings # # infrastructure to keep track of warnings that have # # already been seen. Since we want to get every single # # warning out of this, we have to delete all of them first. # # -----------------------------------------------------------# for module in (converters, exceptions, tree, xmlutil): if hasattr(module, '__warningregistry__'): del module.__warningregistry__
ea272a94ac9381acaf74d7262d7e8d2eedb17f100eae6c7c5c48707603514ec9
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package reads and writes data formats used by the Virtual Observatory (VO) initiative, particularly the VOTable XML format. """ from .table import ( parse, parse_single_table, validate, from_table, is_votable, writeto) from .exceptions import ( VOWarning, VOTableChangeWarning, VOTableSpecWarning, UnimplementedWarning, IOWarning, VOTableSpecError) from ... import config as _config __all__ = [ 'Conf', 'conf', 'parse', 'parse_single_table', 'validate', 'from_table', 'is_votable', 'writeto', 'VOWarning', 'VOTableChangeWarning', 'VOTableSpecWarning', 'UnimplementedWarning', 'IOWarning', 'VOTableSpecError'] class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.io.votable`. """ pedantic = _config.ConfigItem( False, 'When True, treat fixable violations of the VOTable spec as exceptions.', aliases=['astropy.io.votable.table.pedantic']) conf = Conf()
aa7c71db116f415401efb81ceb95eb0b2186c674b8bb2f213eaf99a5f4a6b249
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ .. _warnings: Warnings -------- .. note:: Most of the following warnings indicate violations of the VOTable specification. They should be reported to the authors of the tools that produced the VOTable file. To control the warnings emitted, use the standard Python :mod:`warnings` module. Most of these are of the type `VOTableSpecWarning`. {warnings} .. _exceptions: Exceptions ---------- .. note:: This is a list of many of the fatal exceptions emitted by vo.table when the file does not conform to spec. Other exceptions may be raised due to unforeseen cases or bugs in vo.table itself. {exceptions} """ # STDLIB import io import re from textwrap import dedent from warnings import warn from ...utils.exceptions import AstropyWarning __all__ = [ 'warn_or_raise', 'vo_raise', 'vo_reraise', 'vo_warn', 'warn_unknown_attrs', 'parse_vowarning', 'VOWarning', 'VOTableChangeWarning', 'VOTableSpecWarning', 'UnimplementedWarning', 'IOWarning', 'VOTableSpecError'] MAX_WARNINGS = 10 def _format_message(message, name, config=None, pos=None): if config is None: config = {} if pos is None: pos = ('?', '?') filename = config.get('filename', '?') return '{}:{}:{}: {}: {}'.format(filename, pos[0], pos[1], name, message) def _suppressed_warning(warning, config, stacklevel=2): warning_class = type(warning) config.setdefault('_warning_counts', dict()).setdefault(warning_class, 0) config['_warning_counts'][warning_class] += 1 message_count = config['_warning_counts'][warning_class] if message_count <= MAX_WARNINGS: if message_count == MAX_WARNINGS: warning.formatted_message += \ ' (suppressing further warnings of this type...)' warn(warning, stacklevel=stacklevel+1) def warn_or_raise(warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1): """ Warn or raise an exception, depending on the pedantic setting. """ if config is None: config = {} if config.get('pedantic'): if exception_class is None: exception_class = warning_class vo_raise(exception_class, args, config, pos) else: vo_warn(warning_class, args, config, pos, stacklevel=stacklevel+1) def vo_raise(exception_class, args=(), config=None, pos=None): """ Raise an exception, with proper position information if available. """ if config is None: config = {} raise exception_class(args, config, pos) def vo_reraise(exc, config=None, pos=None, additional=''): """ Raise an exception, with proper position information if available. Restores the original traceback of the exception, and should only be called within an "except:" block of code. """ if config is None: config = {} message = _format_message(str(exc), exc.__class__.__name__, config, pos) if message.split()[0] == str(exc).split()[0]: message = str(exc) if len(additional): message += ' ' + additional exc.args = (message,) raise exc def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1): """ Warn, with proper position information if available. """ if config is None: config = {} warning = warning_class(args, config, pos) _suppressed_warning(warning, config, stacklevel=stacklevel+1) def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1): for attr in attrs: if attr not in good_attr: vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel+1) _warning_pat = re.compile( (r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): " + r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$")) def parse_vowarning(line): """ Parses the vo warning string back into its parts. """ result = {} match = _warning_pat.search(line) if match: result['warning'] = warning = match.group('warning') if warning is not None: result['is_warning'] = (warning[0].upper() == 'W') result['is_exception'] = not result['is_warning'] result['number'] = int(match.group('warning')[1:]) result['doc_url'] = "io/votable/api_exceptions.html#{0}".format( warning.lower()) else: result['is_warning'] = False result['is_exception'] = False result['is_other'] = True result['number'] = None result['doc_url'] = None try: result['nline'] = int(match.group('nline')) except ValueError: result['nline'] = 0 try: result['nchar'] = int(match.group('nchar')) except ValueError: result['nchar'] = 0 result['message'] = match.group('rest') result['is_something'] = True else: result['warning'] = None result['is_warning'] = False result['is_exception'] = False result['is_other'] = False result['is_something'] = False if not isinstance(line, str): line = line.decode('utf-8') result['message'] = line return result class VOWarning(AstropyWarning): """ The base class of all VO warnings and exceptions. Handles the formatting of the message with a warning or exception code, filename, line and column number. """ default_args = () message_template = '' def __init__(self, args, config=None, pos=None): if config is None: config = {} if not isinstance(args, tuple): args = (args, ) msg = self.message_template.format(*args) self.formatted_message = _format_message( msg, self.__class__.__name__, config, pos) Warning.__init__(self, self.formatted_message) def __str__(self): return self.formatted_message @classmethod def get_short_name(cls): if len(cls.default_args): return cls.message_template.format(*cls.default_args) return cls.message_template class VOTableChangeWarning(VOWarning, SyntaxWarning): """ A change has been made to the input XML file. """ class VOTableSpecWarning(VOWarning, SyntaxWarning): """ The input XML file violates the spec, but there is an obvious workaround. """ class UnimplementedWarning(VOWarning, SyntaxWarning): """ A feature of the VOTABLE_ spec is not implemented. """ class IOWarning(VOWarning, RuntimeWarning): """ A network or IO error occurred, but was recovered using the cache. """ class VOTableSpecError(VOWarning, ValueError): """ The input XML file violates the spec and there is no good workaround. """ class W01(VOTableSpecWarning): """ The VOTable spec states: If a cell contains an array or complex number, it should be encoded as multiple numbers separated by whitespace. Many VOTable files in the wild use commas as a separator instead, and ``vo.table`` supports this convention when not in :ref:`pedantic-mode`. ``vo.table`` always outputs files using only spaces, regardless of how they were input. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__ """ message_template = "Array uses commas rather than whitespace" class W02(VOTableSpecWarning): r""" XML ids must match the following regular expression:: ^[A-Za-z_][A-Za-z0-9_\.\-]*$ The VOTable 1.1 says the following: According to the XML standard, the attribute ``ID`` is a string beginning with a letter or underscore (``_``), followed by a sequence of letters, digits, or any of the punctuation characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or ``:`` (colon). However, this is in conflict with the XML standard, which says colons may not be used. VOTable 1.1's own schema does not allow a colon here. Therefore, ``vo.table`` disallows the colon. VOTable 1.2 corrects this error in the specification. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__ """ message_template = "{} attribute '{}' is invalid. Must be a standard XML id" default_args = ('x', 'y') class W03(VOTableChangeWarning): """ The VOTable 1.1 spec says the following about ``name`` vs. ``ID`` on ``FIELD`` and ``VALUE`` elements: ``ID`` and ``name`` attributes have a different role in VOTable: the ``ID`` is meant as a *unique identifier* of an element seen as a VOTable component, while the ``name`` is meant for presentation purposes, and need not to be unique throughout the VOTable document. The ``ID`` attribute is therefore required in the elements which have to be referenced, but in principle any element may have an ``ID`` attribute. ... In summary, the ``ID`` is different from the ``name`` attribute in that (a) the ``ID`` attribute is made from a restricted character set, and must be unique throughout a VOTable document whereas names are standard XML attributes and need not be unique; and (b) there should be support in the parsing software to look up references and extract the relevant element with matching ``ID``. It is further recommended in the VOTable 1.2 spec: While the ``ID`` attribute has to be unique in a VOTable document, the ``name`` attribute need not. It is however recommended, as a good practice, to assign unique names within a ``TABLE`` element. This recommendation means that, between a ``TABLE`` and its corresponding closing ``TABLE`` tag, ``name`` attributes of ``FIELD``, ``PARAM`` and optional ``GROUP`` elements should be all different. Since ``vo.table`` requires a unique identifier for each of its columns, ``ID`` is used for the column name when present. However, when ``ID`` is not present, (since it is not required by the specification) ``name`` is used instead. However, ``name`` must be cleansed by replacing invalid characters (such as whitespace) with underscores. .. note:: This warning does not indicate that the input file is invalid with respect to the VOTable specification, only that the column names in the record array may not match exactly the ``name`` attributes specified in the file. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = "Implicitly generating an ID from a name '{}' -> '{}'" default_args = ('x', 'y') class W04(VOTableSpecWarning): """ The ``content-type`` attribute must use MIME content-type syntax as defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__. The current check for validity is somewhat over-permissive. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__ """ message_template = "content-type '{}' must be a valid MIME content type" default_args = ('x',) class W05(VOTableSpecWarning): """ The attribute must be a valid URI as defined in `RFC 2396 <http://www.ietf.org/rfc/rfc2396.txt>`_. """ message_template = "'{}' is not a valid URI" default_args = ('x',) class W06(VOTableSpecWarning): """ This warning is emitted when a ``ucd`` attribute does not match the syntax of a `unified content descriptor <http://vizier.u-strasbg.fr/doc/UCD.htx>`__. If the VOTable version is 1.2 or later, the UCD will also be checked to ensure it conforms to the controlled vocabulary defined by UCD1+. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__ """ message_template = "Invalid UCD '{}': {}" default_args = ('x', 'explanation') class W07(VOTableSpecWarning): """ As astro year field is a Besselian or Julian year matching the regular expression:: ^[JB]?[0-9]+([.][0-9]*)?$ Defined in this XML Schema snippet:: <xs:simpleType name="astroYear"> <xs:restriction base="xs:token"> <xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/> </xs:restriction> </xs:simpleType> """ message_template = "Invalid astroYear in {}: '{}'" default_args = ('x', 'y') class W08(VOTableSpecWarning): """ To avoid local-dependent number parsing differences, ``vo.table`` may require a string or unicode string where a numeric type may make more sense. """ message_template = "'{}' must be a str or bytes object" default_args = ('x',) class W09(VOTableSpecWarning): """ The VOTable specification uses the attribute name ``ID`` (with uppercase letters) to specify unique identifiers. Some VOTable-producing tools use the more standard lowercase ``id`` instead. ``vo.table`` accepts ``id`` and emits this warning when not in ``pedantic`` mode. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = "ID attribute not capitalized" class W10(VOTableSpecWarning): """ The parser has encountered an element that does not exist in the specification, or appears in an invalid context. Check the file against the VOTable schema (with a tool such as `xmllint <http://xmlsoft.org/xmllint.html>`__. If the file validates against the schema, and you still receive this warning, this may indicate a bug in ``vo.table``. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ """ message_template = "Unknown tag '{}'. Ignoring" default_args = ('x',) class W11(VOTableSpecWarning): """ Earlier versions of the VOTable specification used a ``gref`` attribute on the ``LINK`` element to specify a `GLU reference <http://aladin.u-strasbg.fr/glu/>`__. New files should specify a ``glu:`` protocol using the ``href`` attribute. Since ``vo.table`` does not currently support GLU references, it likewise does not automatically convert the ``gref`` attribute to the new form. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__ """ message_template = "The gref attribute on LINK is deprecated in VOTable 1.1" class W12(VOTableChangeWarning): """ In order to name the columns of the Numpy record array, each ``FIELD`` element must have either an ``ID`` or ``name`` attribute to derive a name from. Strictly speaking, according to the VOTable schema, the ``name`` attribute is required. However, if ``name`` is not present by ``ID`` is, and *pedantic mode* is off, ``vo.table`` will continue without a ``name`` defined. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = ( "'{}' element must have at least one of 'ID' or 'name' attributes") default_args = ('x',) class W13(VOTableSpecWarning): """ Some VOTable files in the wild use non-standard datatype names. These are mapped to standard ones using the following mapping:: string -> char unicodeString -> unicodeChar int16 -> short int32 -> int int64 -> long float32 -> float float64 -> double unsignedInt -> long unsignedShort -> int To add more datatype mappings during parsing, use the ``datatype_mapping`` keyword to `astropy.io.votable.parse`. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "'{}' is not a valid VOTable datatype, should be '{}'" default_args = ('x', 'y') # W14: Deprecated class W15(VOTableSpecWarning): """ The ``name`` attribute is required on every ``FIELD`` element. However, many VOTable files in the wild omit it and provide only an ``ID`` instead. In this case, when *pedantic mode* is off, ``vo.table`` will copy the ``name`` attribute to a new ``ID`` attribute. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = "{} element missing required 'name' attribute" default_args = ('x',) # W16: Deprecated class W17(VOTableSpecWarning): """ A ``DESCRIPTION`` element can only appear once within its parent element. According to the schema, it may only occur once (`1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__) However, it is a `proposed extension <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__ to VOTable 1.2. """ message_template = "{} element contains more than one DESCRIPTION element" default_args = ('x',) class W18(VOTableSpecWarning): """ The number of rows explicitly specified in the ``nrows`` attribute does not match the actual number of rows (``TR`` elements) present in the ``TABLE``. This may indicate truncation of the file, or an internal error in the tool that produced it. If *pedantic mode* is off, parsing will proceed, with the loss of some performance. **References:** `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__ """ message_template = 'TABLE specified nrows={}, but table contains {} rows' default_args = ('x', 'y') class W19(VOTableSpecWarning): """ The column fields as defined using ``FIELD`` elements do not match those in the headers of the embedded FITS file. If *pedantic mode* is off, the embedded FITS file will take precedence. """ message_template = ( 'The fields defined in the VOTable do not match those in the ' + 'embedded FITS file') class W20(VOTableSpecWarning): """ If no version number is explicitly given in the VOTable file, the parser assumes it is written to the VOTable 1.1 specification. """ message_template = 'No version number specified in file. Assuming {}' default_args = ('1.1',) class W21(UnimplementedWarning): """ Unknown issues may arise using ``vo.table`` with VOTable files from a version other than 1.1, 1.2 or 1.3. """ message_template = ( 'vo.table is designed for VOTable version 1.1, 1.2 and 1.3, but ' + 'this file is {}') default_args = ('x',) class W22(VOTableSpecWarning): """ Version 1.0 of the VOTable specification used the ``DEFINITIONS`` element to define coordinate systems. Version 1.1 now uses ``COOSYS`` elements throughout the document. **References:** `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__ """ message_template = 'The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring' class W23(IOWarning): """ Raised when the VO service database can not be updated (possibly due to a network outage). This is only a warning, since an older and possible out-of-date VO service database was available locally. """ message_template = "Unable to update service information for '{}'" default_args = ('x',) class W24(VOWarning, FutureWarning): """ The VO catalog database retrieved from the www is designed for a newer version of vo.table. This may cause problems or limited features performing service queries. Consider upgrading vo.table to the latest version. """ message_template = "The VO catalog database is for a later version of vo.table" class W25(IOWarning): """ A VO service query failed due to a network error or malformed arguments. Another alternative service may be attempted. If all services fail, an exception will be raised. """ message_template = "'{}' failed with: {}" default_args = ('service', '...') class W26(VOTableSpecWarning): """ The given element was not supported inside of the given element until the specified VOTable version, however the version declared in the file is for an earlier version. These attributes may not be written out to the file. """ message_template = "'{}' inside '{}' added in VOTable {}" default_args = ('child', 'parent', 'X.X') class W27(VOTableSpecWarning): """ The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in favor of a reference to the Space-Time Coordinate (STC) data model (see `utype <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__ and the IVOA note `referencing STC in VOTable <http://ivoa.net/Documents/latest/VOTableSTC.html>`__. """ message_template = "COOSYS deprecated in VOTable 1.2" class W28(VOTableSpecWarning): """ The given attribute was not supported on the given element until the specified VOTable version, however the version declared in the file is for an earlier version. These attributes may not be written out to the file. """ message_template = "'{}' on '{}' added in VOTable {}" default_args = ('attribute', 'element', 'X.X') class W29(VOTableSpecWarning): """ Some VOTable files specify their version number in the form "v1.0", when the only supported forms in the spec are "1.0". **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ """ message_template = "Version specified in non-standard form '{}'" default_args = ('v1.0',) class W30(VOTableSpecWarning): """ Some VOTable files write missing floating-point values in non-standard ways, such as "null" and "-". In non-pedantic mode, any non-standard floating-point literals are treated as missing values. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "Invalid literal for float '{}'. Treating as empty." default_args = ('x',) class W31(VOTableSpecWarning): """ Since NaN's can not be represented in integer fields directly, a null value must be specified in the FIELD descriptor to support reading NaN's from the tabledata. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "NaN given in an integral field without a specified null value" class W32(VOTableSpecWarning): """ Each field in a table must have a unique ID. If two or more fields have the same ID, some will be renamed to ensure that all IDs are unique. From the VOTable 1.2 spec: The ``ID`` and ``ref`` attributes are defined as XML types ``ID`` and ``IDREF`` respectively. This means that the contents of ``ID`` is an identifier which must be unique throughout a VOTable document, and that the contents of the ``ref`` attribute represents a reference to an identifier which must exist in the VOTable document. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness" default_args = ('x', 'x_2') class W33(VOTableChangeWarning): """ Each field in a table must have a unique name. If two or more fields have the same name, some will be renamed to ensure that all names are unique. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__ """ message_template = "Column name '{}' renamed to '{}' to ensure uniqueness" default_args = ('x', 'x_2') class W34(VOTableSpecWarning): """ The attribute requires the value to be a valid XML token, as defined by `XML 1.0 <http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__. """ message_template = "'{}' is an invalid token for attribute '{}'" default_args = ('x', 'y') class W35(VOTableSpecWarning): """ The ``name`` and ``value`` attributes are required on all ``INFO`` elements. **References:** `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__ """ message_template = "'{}' attribute required for INFO elements" default_args = ('x',) class W36(VOTableSpecWarning): """ If the field specifies a ``null`` value, that value must conform to the given ``datatype``. **References:** `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__ """ message_template = "null value '{}' does not match field datatype, setting to 0" default_args = ('x',) class W37(UnimplementedWarning): """ The 3 datatypes defined in the VOTable specification and supported by vo.table are ``TABLEDATA``, ``BINARY`` and ``FITS``. **References:** `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__ """ message_template = "Unsupported data format '{}'" default_args = ('x',) class W38(VOTableSpecWarning): """ The only encoding for local binary data supported by the VOTable specification is base64. """ message_template = "Inline binary data must be base64 encoded, got '{}'" default_args = ('x',) class W39(VOTableSpecWarning): """ Bit values do not support masking. This warning is raised upon setting masked data in a bit column. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "Bit values can not be masked" class W40(VOTableSpecWarning): """ This is a terrible hack to support Simple Image Access Protocol results from `archive.noao.edu <http://archive.noao.edu>`__. It creates a field for the coordinate projection type of type "double", which actually contains character data. We have to hack the field to store character data, or we can't read it in. A warning will be raised when this happens. """ message_template = "'cprojection' datatype repaired" class W41(VOTableSpecWarning): """ An XML namespace was specified on the ``VOTABLE`` element, but the namespace does not match what is expected for a ``VOTABLE`` file. The ``VOTABLE`` namespace is:: http://www.ivoa.net/xml/VOTable/vX.X where "X.X" is the version number. Some files in the wild set the namespace to the location of the VOTable schema, which is not correct and will not pass some validating parsers. """ message_template = ( "An XML namespace is specified, but is incorrect. Expected " + "'{}', got '{}'") default_args = ('x', 'y') class W42(VOTableSpecWarning): """ The root element should specify a namespace. The ``VOTABLE`` namespace is:: http://www.ivoa.net/xml/VOTable/vX.X where "X.X" is the version number. """ message_template = "No XML namespace specified" class W43(VOTableSpecWarning): """ Referenced elements should be defined before referees. From the VOTable 1.2 spec: In VOTable1.2, it is further recommended to place the ID attribute prior to referencing it whenever possible. """ message_template = "{} ref='{}' which has not already been defined" default_args = ('element', 'x',) class W44(VOTableSpecWarning): """ ``VALUES`` elements that reference another element should not have their own content. From the VOTable 1.2 spec: The ``ref`` attribute of a ``VALUES`` element can be used to avoid a repetition of the domain definition, by referring to a previously defined ``VALUES`` element having the referenced ``ID`` attribute. When specified, the ``ref`` attribute defines completely the domain without any other element or attribute, as e.g. ``<VALUES ref="RAdomain"/>`` """ message_template = "VALUES element with ref attribute has content ('{}')" default_args = ('element',) class W45(VOWarning, ValueError): """ The ``content-role`` attribute on the ``LINK`` element must be one of the following:: query, hints, doc, location And in VOTable 1.3, additionally:: type **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ `1.3 <http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__ """ message_template = "content-role attribute '{}' invalid" default_args = ('x',) class W46(VOTableSpecWarning): """ The given char or unicode string is too long for the specified field length. """ message_template = "{} value is too long for specified length of {}" default_args = ('char or unicode', 'x') class W47(VOTableSpecWarning): """ If no arraysize is specified on a char field, the default of '1' is implied, but this is rarely what is intended. """ message_template = "Missing arraysize indicates length 1" class W48(VOTableSpecWarning): """ The attribute is not defined in the specification. """ message_template = "Unknown attribute '{}' on {}" default_args = ('attribute', 'element') class W49(VOTableSpecWarning): """ Prior to VOTable 1.3, the empty cell was illegal for integer fields. If a \"null\" value was specified for the cell, it will be used for the value, otherwise, 0 will be used. """ message_template = "Empty cell illegal for integer fields." class W50(VOTableSpecWarning): """ Invalid unit string as defined in the `Standards for Astronomical Catalogues, Version 2.0 <http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_. Consider passing an explicit ``unit_format`` parameter if the units in this file conform to another specification. """ message_template = "Invalid unit string '{}'" default_args = ('x',) class W51(VOTableSpecWarning): """ The integer value is out of range for the size of the field. """ message_template = "Value '{}' is out of range for a {} integer field" default_args = ('x', 'n-bit') class W52(VOTableSpecWarning): """ The BINARY2 format was introduced in VOTable 1.3. It should not be present in files marked as an earlier version. """ message_template = ("The BINARY2 format was introduced in VOTable 1.3, but " "this file is declared as version '{}'") default_args = ('1.2',) class W53(VOTableSpecWarning): """ The VOTABLE element must contain at least one RESOURCE element. """ message_template = ("VOTABLE element must contain at least one RESOURCE element.") default_args = () class E01(VOWarning, ValueError): """ The size specifier for a ``char`` or ``unicode`` field must be only a number followed, optionally, by an asterisk. Multi-dimensional size specifiers are not supported for these datatypes. Strings, which are defined as a set of characters, can be represented in VOTable as a fixed- or variable-length array of characters:: <FIELD name="unboundedString" datatype="char" arraysize="*"/> A 1D array of strings can be represented as a 2D array of characters, but given the logic above, it is possible to define a variable-length array of fixed-length strings, but not a fixed-length array of variable-length strings. """ message_template = "Invalid size specifier '{}' for a {} field (in field '{}')" default_args = ('x', 'char/unicode', 'y') class E02(VOWarning, ValueError): """ The number of array elements in the data does not match that specified in the FIELD specifier. """ message_template = ( "Incorrect number of elements in array. " + "Expected multiple of {}, got {}") default_args = ('x', 'y') class E03(VOWarning, ValueError): """ Complex numbers should be two values separated by whitespace. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "'{}' does not parse as a complex number" default_args = ('x',) class E04(VOWarning, ValueError): """ A ``bit`` array should be a string of '0's and '1's. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "Invalid bit value '{}'" default_args = ('x',) class E05(VOWarning, ValueError): r""" A ``boolean`` value should be one of the following strings (case insensitive) in the ``TABLEDATA`` format:: 'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?' and in ``BINARY`` format:: 'T', 'F', '1', '0', '\0', ' ', '?' **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "Invalid boolean value '{}'" default_args = ('x',) class E06(VOWarning, ValueError): """ The supported datatypes are:: double, float, bit, boolean, unsignedByte, short, int, long, floatComplex, doubleComplex, char, unicodeChar The following non-standard aliases are also supported, but in these case :ref:`W13 <W13>` will be raised:: string -> char unicodeString -> unicodeChar int16 -> short int32 -> int int64 -> long float32 -> float float64 -> double unsignedInt -> long unsignedShort -> int To add more datatype mappings during parsing, use the ``datatype_mapping`` keyword to `astropy.io.votable.parse`. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__ """ message_template = "Unknown datatype '{}' on field '{}'" default_args = ('x', 'y') # E07: Deprecated class E08(VOWarning, ValueError): """ The ``type`` attribute on the ``VALUES`` element must be either ``legal`` or ``actual``. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__ """ message_template = "type must be 'legal' or 'actual', but is '{}'" default_args = ('x',) class E09(VOWarning, ValueError): """ The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a ``value`` attribute. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__ """ message_template = "'{}' must have a value attribute" default_args = ('x',) class E10(VOWarning, ValueError): """ From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have a ``datatype`` field. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__ """ message_template = "'datatype' attribute required on all '{}' elements" default_args = ('FIELD',) class E11(VOWarning, ValueError): """ The precision attribute is meant to express the number of significant digits, either as a number of decimal places (e.g. ``precision="F2"`` or equivalently ``precision="2"`` to express 2 significant figures after the decimal point), or as a number of significant figures (e.g. ``precision="E5"`` indicates a relative precision of 10-5). It is validated using the following regular expression:: [EF]?[1-9][0-9]* **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__ """ message_template = "precision '{}' is invalid" default_args = ('x',) class E12(VOWarning, ValueError): """ The width attribute is meant to indicate to the application the number of characters to be used for input or output of the quantity. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__ """ message_template = "width must be a positive integer, got '{}'" default_args = ('x',) class E13(VOWarning, ValueError): r""" From the VOTable 1.2 spec: A table cell can contain an array of a given primitive type, with a fixed or variable number of elements; the array may even be multidimensional. For instance, the position of a point in a 3D space can be defined by the following:: <FIELD ID="point_3D" datatype="double" arraysize="3"/> and each cell corresponding to that definition must contain exactly 3 numbers. An asterisk (\*) may be appended to indicate a variable number of elements in the array, as in:: <FIELD ID="values" datatype="int" arraysize="100*"/> where it is specified that each cell corresponding to that definition contains 0 to 100 integer numbers. The number may be omitted to specify an unbounded array (in practice up to =~2×10⁹ elements). A table cell can also contain a multidimensional array of a given primitive type. This is specified by a sequence of dimensions separated by the ``x`` character, with the first dimension changing fastest; as in the case of a simple array, the last dimension may be variable in length. As an example, the following definition declares a table cell which may contain a set of up to 10 images, each of 64×64 bytes:: <FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/> **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__ """ message_template = "Invalid arraysize attribute '{}'" default_args = ('x',) class E14(VOWarning, ValueError): """ All ``PARAM`` elements must have a ``value`` attribute. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__ """ message_template = "value attribute is required for all PARAM elements" class E15(VOWarning, ValueError): """ All ``COOSYS`` elements must have an ``ID`` attribute. Note that the VOTable 1.1 specification says this attribute is optional, but its corresponding schema indicates it is required. In VOTable 1.2, the ``COOSYS`` element is deprecated. """ message_template = "ID attribute is required for all COOSYS elements" class E16(VOTableSpecWarning): """ The ``system`` attribute on the ``COOSYS`` element must be one of the following:: 'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic', 'supergalactic', 'xy', 'barycentric', 'geo_app' **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__ """ message_template = "Invalid system attribute '{}'" default_args = ('x',) class E17(VOWarning, ValueError): """ ``extnum`` attribute must be a positive integer. **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ """ message_template = "extnum must be a positive integer" class E18(VOWarning, ValueError): """ The ``type`` attribute of the ``RESOURCE`` element must be one of "results" or "meta". **References**: `1.1 <http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__, `1.2 <http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__ """ message_template = "type must be 'results' or 'meta', not '{}'" default_args = ('x',) class E19(VOWarning, ValueError): """ Raised either when the file doesn't appear to be XML, or the root element is not VOTABLE. """ message_template = "File does not appear to be a VOTABLE" class E20(VOTableSpecError): """ The table had only *x* fields defined, but the data itself has more columns than that. """ message_template = "Data has more columns than are defined in the header ({})" default_args = ('x',) class E21(VOWarning, ValueError): """ The table had *x* fields defined, but the data itself has only *y* columns. """ message_template = "Data has fewer columns ({}) than are defined in the header ({})" default_args = ('x', 'y') def _get_warning_and_exception_classes(prefix): classes = [] for key, val in globals().items(): if re.match(prefix + "[0-9]{2}", key): classes.append((key, val)) classes.sort() return classes def _build_doc_string(): def generate_set(prefix): classes = _get_warning_and_exception_classes(prefix) out = io.StringIO() for name, cls in classes: out.write(".. _{}:\n\n".format(name)) msg = "{}: {}".format(cls.__name__, cls.get_short_name()) if not isinstance(msg, str): msg = msg.decode('utf-8') out.write(msg) out.write('\n') out.write('~' * len(msg)) out.write('\n\n') doc = cls.__doc__ if not isinstance(doc, str): doc = doc.decode('utf-8') out.write(dedent(doc)) out.write('\n\n') return out.getvalue() warnings = generate_set('W') exceptions = generate_set('E') return {'warnings': warnings, 'exceptions': exceptions} if __doc__ is not None: __doc__ = __doc__.format(**_build_doc_string()) __all__.extend([x[0] for x in _get_warning_and_exception_classes('W')]) __all__.extend([x[0] for x in _get_warning_and_exception_classes('E')])
2e5f3605f11b602b4279d0a0ada6a48eff4833cc78112050a97cb6000dd94cd3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This file contains routines to verify the correctness of UCD strings. """ # STDLIB import re # LOCAL from ...utils import data __all__ = ['parse_ucd', 'check_ucd'] class UCDWords: """ Manages a list of acceptable UCD words. Works by reading in a data file exactly as provided by IVOA. This file resides in data/ucd1p-words.txt. """ def __init__(self): self._primary = set() self._secondary = set() self._descriptions = {} self._capitalization = {} with data.get_pkg_data_fileobj( "data/ucd1p-words.txt", encoding='ascii') as fd: for line in fd.readlines(): type, name, descr = [ x.strip() for x in line.split('|')] name_lower = name.lower() if type in 'QPEV': self._primary.add(name_lower) if type in 'QSEV': self._secondary.add(name_lower) self._descriptions[name_lower] = descr self._capitalization[name_lower] = name def is_primary(self, name): """ Returns True if *name* is a valid primary name. """ return name.lower() in self._primary def is_secondary(self, name): """ Returns True if *name* is a valid secondary name. """ return name.lower() in self._secondary def get_description(self, name): """ Returns the official English description of the given UCD *name*. """ return self._descriptions[name.lower()] def normalize_capitalization(self, name): """ Returns the standard capitalization form of the given name. """ return self._capitalization[name.lower()] _ucd_singleton = None def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False): """ Parse the UCD into its component parts. Parameters ---------- ucd : str The UCD string check_controlled_vocabulary : bool, optional If `True`, then each word in the UCD will be verified against the UCD1+ controlled vocabulary, (as required by the VOTable specification version 1.2), otherwise not. has_colon : bool, optional If `True`, the UCD may contain a colon (as defined in earlier versions of the standard). Returns ------- parts : list The result is a list of tuples of the form: (*namespace*, *word*) If no namespace was explicitly specified, *namespace* will be returned as ``'ivoa'`` (i.e., the default namespace). Raises ------ ValueError : *ucd* is invalid """ global _ucd_singleton if _ucd_singleton is None: _ucd_singleton = UCDWords() if has_colon: m = re.search(r'[^A-Za-z0-9_.:;\-]', ucd) else: m = re.search(r'[^A-Za-z0-9_.;\-]', ucd) if m is not None: raise ValueError("UCD has invalid character '{}' in '{}'".format( m.group(0), ucd)) word_component_re = r'[A-Za-z0-9][A-Za-z0-9\-_]*' word_re = r'{}(\.{})*'.format(word_component_re, word_component_re) parts = ucd.split(';') words = [] for i, word in enumerate(parts): colon_count = word.count(':') if colon_count == 1: ns, word = word.split(':', 1) if not re.match(word_component_re, ns): raise ValueError("Invalid namespace '{}'".format(ns)) ns = ns.lower() elif colon_count > 1: raise ValueError("Too many colons in '{}'".format(word)) else: ns = 'ivoa' if not re.match(word_re, word): raise ValueError("Invalid word '{}'".format(word)) if ns == 'ivoa' and check_controlled_vocabulary: if i == 0: if not _ucd_singleton.is_primary(word): if _ucd_singleton.is_secondary(word): raise ValueError( "Secondary word '{}' is not valid as a primary " "word".format(word)) else: raise ValueError("Unknown word '{}'".format(word)) else: if not _ucd_singleton.is_secondary(word): if _ucd_singleton.is_primary(word): raise ValueError( "Primary word '{}' is not valid as a secondary " "word".format(word)) else: raise ValueError("Unknown word '{}'".format(word)) try: normalized_word = _ucd_singleton.normalize_capitalization(word) except KeyError: normalized_word = word words.append((ns, normalized_word)) return words def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False): """ Returns False if *ucd* is not a valid `unified content descriptor`_. Parameters ---------- ucd : str The UCD string check_controlled_vocabulary : bool, optional If `True`, then each word in the UCD will be verified against the UCD1+ controlled vocabulary, (as required by the VOTable specification version 1.2), otherwise not. has_colon : bool, optional If `True`, the UCD may contain a colon (as defined in earlier versions of the standard). Returns ------- valid : bool """ if ucd is None: return True try: parse_ucd(ucd, check_controlled_vocabulary=check_controlled_vocabulary, has_colon=has_colon) except ValueError: return False return True
50f1a9a4f6804d57a11ba203602a49990c918f6bf0f14c05ef4d9885a88a2527
# Licensed under a 3-clause BSD style license - see LICENSE.rst from distutils.core import Extension from os.path import join def get_extensions(build_type='release'): VO_DIR = 'astropy/io/votable/src' return [Extension( "astropy.io.votable.tablewriter", [join(VO_DIR, "tablewriter.c")], include_dirs=[VO_DIR])] def get_package_data(): return { 'astropy.io.votable': [ 'data/ucd1p-words.txt', 'data/*.xsd', 'data/*.dtd'], 'astropy.io.votable.tests': [ 'data/*.xml', 'data/*.gz', 'data/*.json', 'data/*.fits', 'data/*.txt'], 'astropy.io.votable.validator': [ 'urls/*.dat.gz']}
f65abf26743becc6a06b4b9efe3f59b28eac5891db861d6569e1fb886df587ff
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Various utilities and cookbook-like things. """ # STDLIB import codecs import contextlib import io import re import gzip from distutils import version __all__ = [ 'convert_to_writable_filelike', 'stc_reference_frames', 'coerce_range_list_param', ] @contextlib.contextmanager def convert_to_writable_filelike(fd, compressed=False): """ Returns a writable file-like object suitable for streaming output. Parameters ---------- fd : file path string or writable file-like object May be: - a file path, in which case it is opened, and the file object is returned. - an object with a :meth:``write`` method, in which case that object is returned. compressed : bool, optional If `True`, create a gzip-compressed file. (Default is `False`). Returns ------- fd : writable file-like object """ if isinstance(fd, str): if fd.endswith('.gz') or compressed: with gzip.GzipFile(fd, 'wb') as real_fd: encoded_fd = io.TextIOWrapper(real_fd, encoding='utf8') yield encoded_fd encoded_fd.flush() real_fd.flush() return else: with open(fd, 'wt', encoding='utf8') as real_fd: yield real_fd return elif hasattr(fd, 'write'): assert callable(fd.write) if compressed: fd = gzip.GzipFile(fileobj=fd) # If we can't write Unicode strings, use a codecs.StreamWriter # object needs_wrapper = False try: fd.write('') except TypeError: needs_wrapper = True if not hasattr(fd, 'encoding') or fd.encoding is None: needs_wrapper = True if needs_wrapper: yield codecs.getwriter('utf-8')(fd) fd.flush() else: yield fd fd.flush() return else: raise TypeError("Can not be coerced to writable file-like object") # <http://www.ivoa.net/Documents/REC/DM/STC-20071030.html> stc_reference_frames = set([ 'FK4', 'FK5', 'ECLIPTIC', 'ICRS', 'GALACTIC', 'GALACTIC_I', 'GALACTIC_II', 'SUPER_GALACTIC', 'AZ_EL', 'BODY', 'GEO_C', 'GEO_D', 'MAG', 'GSE', 'GSM', 'SM', 'HGC', 'HGS', 'HEEQ', 'HRTN', 'HPC', 'HPR', 'HCC', 'HGI', 'MERCURY_C', 'VENUS_C', 'LUNA_C', 'MARS_C', 'JUPITER_C_III', 'SATURN_C_III', 'URANUS_C_III', 'NEPTUNE_C_III', 'PLUTO_C', 'MERCURY_G', 'VENUS_G', 'LUNA_G', 'MARS_G', 'JUPITER_G_III', 'SATURN_G_III', 'URANUS_G_III', 'NEPTUNE_G_III', 'PLUTO_G', 'UNKNOWNFrame']) def coerce_range_list_param(p, frames=None, numeric=True): """ Coerces and/or verifies the object *p* into a valid range-list-format parameter. As defined in `Section 8.7.2 of Simple Spectral Access Protocol <http://www.ivoa.net/Documents/REC/DAL/SSA-20080201.html>`_. Parameters ---------- p : str or sequence May be a string as passed verbatim to the service expecting a range-list, or a sequence. If a sequence, each item must be either: - a numeric value - a named value, such as, for example, 'J' for named spectrum (if the *numeric* kwarg is False) - a 2-tuple indicating a range - the last item my be a string indicating the frame of reference frames : sequence of str, optional A sequence of acceptable frame of reference keywords. If not provided, the default set in ``set_reference_frames`` will be used. numeric : bool, optional TODO Returns ------- parts : tuple The result is a tuple: - a string suitable for passing to a service as a range-list argument - an integer counting the number of elements """ def str_or_none(x): if x is None: return '' if numeric: x = float(x) return str(x) def numeric_or_range(x): if isinstance(x, tuple) and len(x) == 2: return '{}/{}'.format(str_or_none(x[0]), str_or_none(x[1])) else: return str_or_none(x) def is_frame_of_reference(x): return isinstance(x, str) if p is None: return None, 0 elif isinstance(p, (tuple, list)): has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1]) if has_frame_of_reference: points = p[:-1] else: points = p[:] out = ','.join([numeric_or_range(x) for x in points]) length = len(points) if has_frame_of_reference: if frames is not None and p[-1] not in frames: raise ValueError( "'{}' is not a valid frame of reference".format(p[-1])) out += ';' + p[-1] length += 1 return out, length elif isinstance(p, str): number = r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?' if not numeric: number = r'(' + number + ')|([A-Z_]+)' match = re.match( '^' + number + r'([,/]' + number + r')+(;(?P<frame>[<A-Za-z_0-9]+))?$', p) if match is None: raise ValueError("'{}' is not a valid range list".format(p)) frame = match.groupdict()['frame'] if frames is not None and frame is not None and frame not in frames: raise ValueError( "'{}' is not a valid frame of reference".format(frame)) return p, p.count(',') + p.count(';') + 1 try: float(p) return str(p), 1 except TypeError: raise ValueError("'{}' is not a valid range list".format(p)) def version_compare(a, b): """ Compare two VOTable version identifiers. """ def version_to_tuple(v): if v[0].lower() == 'v': v = v[1:] return version.StrictVersion(v) av = version_to_tuple(a) bv = version_to_tuple(b) # Can't use cmp because it was removed from Python 3.x return (av > bv) - (av < bv)
59a06b68d217ea62af5ffeb1129761d4942403a441bf88d32aa8e67b488da767
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Various XML-related utilities """ # ASTROPY from ...logger import log from ...utils import data from ...utils.xml import check as xml_check from ...utils.xml import validate # LOCAL from .exceptions import (warn_or_raise, vo_warn, W02, W03, W04, W05) __all__ = [ 'check_id', 'fix_id', 'check_token', 'check_mime_content_type', 'check_anyuri', 'validate_schema' ] def check_id(ID, name='ID', config=None, pos=None): """ Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *ID* is not a valid XML ID_. *name* is the name of the attribute being checked (used only for error messages). """ if (ID is not None and not xml_check.check_id(ID)): warn_or_raise(W02, W02, (name, ID), config, pos) return False return True def fix_id(ID, config=None, pos=None): """ Given an arbitrary string, create one that can be used as an xml id. This is rather simplistic at the moment, since it just replaces non-valid characters with underscores. """ if ID is None: return None corrected = xml_check.fix_id(ID) if corrected != ID: vo_warn(W03, (ID, corrected), config, pos) return corrected _token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])" def check_token(token, attr_name, config=None, pos=None): """ Raises a `ValueError` if *token* is not a valid XML token. As defined by XML Schema Part 2. """ if (token is not None and not xml_check.check_token(token)): return False return True def check_mime_content_type(content_type, config=None, pos=None): """ Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *content_type* is not a valid MIME content type. As defined by RFC 2045 (syntactically, at least). """ if (content_type is not None and not xml_check.check_mime_content_type(content_type)): warn_or_raise(W04, W04, content_type, config, pos) return False return True def check_anyuri(uri, config=None, pos=None): """ Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if *uri* is not a valid URI. As defined in RFC 2396. """ if (uri is not None and not xml_check.check_anyuri(uri)): warn_or_raise(W05, W05, uri, config, pos) return False return True def validate_schema(filename, version='1.1'): """ Validates the given file against the appropriate VOTable schema. Parameters ---------- filename : str The path to the XML file to validate version : str, optional The VOTABLE version to check, which must be a string \"1.0\", \"1.1\", \"1.2\" or \"1.3\". If it is not one of these, version \"1.1\" is assumed. For version \"1.0\", it is checked against a DTD, since that version did not have an XML Schema. Returns ------- returncode, stdout, stderr : int, str, str Returns the returncode from xmllint and the stdout and stderr as strings """ if version not in ('1.0', '1.1', '1.2', '1.3'): log.info('{0} has version {1}, using schema 1.1'.format( filename, version)) version = '1.1' if version in ('1.1', '1.2', '1.3'): schema_path = data.get_pkg_data_filename( 'data/VOTable.v{0}.xsd'.format(version)) else: schema_path = data.get_pkg_data_filename( 'data/VOTable.dtd') return validate.validate_schema(filename, schema_path)
0483fc3144978bb0c128368212a642fead91345185a69ed20e5fdb90cb8e5dfe
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ ``fitsheader`` is a command line script based on astropy.io.fits for printing the header(s) of one or more FITS file(s) to the standard output in a human- readable format. Example uses of fitsheader: 1. Print the header of all the HDUs of a .fits file:: $ fitsheader filename.fits 2. Print the header of the third and fifth HDU extension:: $ fitsheader --extension 3 --extension 5 filename.fits 3. Print the header of a named extension, e.g. select the HDU containing keywords EXTNAME='SCI' and EXTVER='2':: $ fitsheader --extension "SCI,2" filename.fits 4. Print only specific keywords:: $ fitsheader --keyword BITPIX --keyword NAXIS filename.fits 5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard:: $ fitsheader --keyword NAXIS* filename.fits 6. Dump the header keywords of all the files in the current directory into a machine-readable csv file:: $ fitsheader --table ascii.csv *.fits > keywords.csv Note that compressed images (HDUs of type :class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real BINTABLE header to describe the compressed data, and a fake IMAGE header representing the image that was compressed. Astropy returns the latter by default. You must supply the ``--compressed`` option if you require the real header that describes the compression. With Astropy installed, please run ``fitsheader --help`` to see the full usage documentation. """ import sys from ... import fits from .... import log class ExtensionNotFoundException(Exception): """Raised if an HDU extension requested by the user does not exist.""" pass class HeaderFormatter: """Class to format the header(s) of a FITS file for display by the `fitsheader` tool; essentially a wrapper around a `HDUList` object. Example usage: fmt = HeaderFormatter('/path/to/file.fits') print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX'])) Parameters ---------- filename : str Path to a single FITS file. Raises ------ OSError If `filename` does not exist or cannot be read. """ def __init__(self, filename): self.filename = filename self._hdulist = fits.open(filename) def parse(self, extensions=None, keywords=None, compressed=False): """Returns the FITS file header(s) in a readable format. Parameters ---------- extensions : list of int or str, optional Format only specific HDU(s), identified by number or name. The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER" keywords. keywords : list of str, optional Keywords for which the value(s) should be returned. If not specified, then the entire header is returned. compressed : boolean, optional If True, shows the header describing the compression, rather than the header obtained after decompression. (Affects FITS files containing `CompImageHDU` extensions only.) Returns ------- formatted_header : str or astropy.table.Table Traditional 80-char wide format in the case of `HeaderFormatter`; an Astropy Table object in the case of `TableHeaderFormatter`. """ # `hdukeys` will hold the keys of the HDUList items to display if extensions is None: hdukeys = range(len(self._hdulist)) # Display all by default else: hdukeys = [] for ext in extensions: try: # HDU may be specified by number hdukeys.append(int(ext)) except ValueError: # The user can specify "EXTNAME" or "EXTNAME,EXTVER" parts = ext.split(',') if len(parts) > 1: extname = ','.join(parts[0:-1]) extver = int(parts[-1]) hdukeys.append((extname, extver)) else: hdukeys.append(ext) # Having established which HDUs the user wants, we now format these: return self._parse_internal(hdukeys, keywords, compressed) def _parse_internal(self, hdukeys, keywords, compressed): """The meat of the formatting; in a separate method to allow overriding. """ result = [] for idx, hdu in enumerate(hdukeys): try: cards = self._get_cards(hdu, keywords, compressed) except ExtensionNotFoundException: continue if idx > 0: # Separate HDUs by a blank line result.append('\n') result.append('# HDU {} in {}:\n'.format(hdu, self.filename)) for c in cards: result.append('{}\n'.format(c)) return ''.join(result) def _get_cards(self, hdukey, keywords, compressed): """Returns a list of `astropy.io.fits.card.Card` objects. This function will return the desired header cards, taking into account the user's preference to see the compressed or uncompressed version. Parameters ---------- hdukey : int or str Key of a single HDU in the HDUList. keywords : list of str, optional Keywords for which the cards should be returned. compressed : boolean, optional If True, shows the header describing the compression. Raises ------ ExtensionNotFoundException If the hdukey does not correspond to an extension. """ # First we obtain the desired header try: if compressed: # In the case of a compressed image, return the header before # decompression (not the default behavior) header = self._hdulist[hdukey]._header else: header = self._hdulist[hdukey].header except (IndexError, KeyError): message = '{0}: Extension {1} not found.'.format(self.filename, hdukey) log.warning(message) raise ExtensionNotFoundException(message) if not keywords: # return all cards cards = header.cards else: # specific keywords are requested cards = [] for kw in keywords: try: crd = header.cards[kw] if isinstance(crd, fits.card.Card): # Single card cards.append(crd) else: # Allow for wildcard access cards.extend(crd) except KeyError as e: # Keyword does not exist log.warning('{filename} (HDU {hdukey}): ' 'Keyword {kw} not found.'.format( filename=self.filename, hdukey=hdukey, kw=kw)) return cards class TableHeaderFormatter(HeaderFormatter): """Class to convert the header(s) of a FITS file into a Table object. The table returned by the `parse` method will contain four columns: filename, hdu, keyword, and value. Subclassed from HeaderFormatter, which contains the meat of the formatting. """ def _parse_internal(self, hdukeys, keywords, compressed): """Method called by the parse method in the parent class.""" tablerows = [] for hdu in hdukeys: try: for card in self._get_cards(hdu, keywords, compressed): tablerows.append({'filename': self.filename, 'hdu': hdu, 'keyword': card.keyword, 'value': str(card.value)}) except ExtensionNotFoundException: pass if tablerows: from .... import table return table.Table(tablerows) return None def print_headers_traditional(args): """Prints FITS header(s) using the traditional 80-char format. Parameters ---------- args : argparse.Namespace Arguments passed from the command-line as defined below. """ for idx, filename in enumerate(args.filename): # support wildcards if idx > 0 and not args.keywords: print() # print a newline between different files try: formatter = HeaderFormatter(filename) print(formatter.parse(args.extensions, args.keywords, args.compressed), end='') except OSError as e: log.error(str(e)) def print_headers_as_table(args): """Prints FITS header(s) in a machine-readable table format. Parameters ---------- args : argparse.Namespace Arguments passed from the command-line as defined below. """ tables = [] # Create a Table object for each file for filename in args.filename: # Support wildcards try: formatter = TableHeaderFormatter(filename) tbl = formatter.parse(args.extensions, args.keywords, args.compressed) if tbl: tables.append(tbl) except OSError as e: log.error(str(e)) # file not found or unreadable # Concatenate the tables if len(tables) == 0: return False elif len(tables) == 1: resulting_table = tables[0] else: from .... import table resulting_table = table.vstack(tables) # Print the string representation of the concatenated table resulting_table.write(sys.stdout, format=args.table) def main(args=None): """This is the main function called by the `fitsheader` script.""" import argparse parser = argparse.ArgumentParser( description=('Print the header(s) of a FITS file. ' 'Optional arguments allow the desired extension(s), ' 'keyword(s), and output format to be specified. ' 'Note that in the case of a compressed image, ' 'the decompressed header is shown by default.')) parser.add_argument('-e', '--extension', metavar='HDU', action='append', dest='extensions', help='specify the extension by name or number; ' 'this argument can be repeated ' 'to select multiple extensions') parser.add_argument('-k', '--keyword', metavar='KEYWORD', action='append', dest='keywords', help='specify a keyword; this argument can be ' 'repeated to select multiple keywords; ' 'also supports wildcards') parser.add_argument('-t', '--table', nargs='?', default=False, metavar='FORMAT', help='print the header(s) in machine-readable table ' 'format; the default format is ' '"ascii.fixed_width" (can be "ascii.csv", ' '"ascii.html", "ascii.latex", "fits", etc)') parser.add_argument('-c', '--compressed', action='store_true', help='for compressed image data, ' 'show the true header which describes ' 'the compression rather than the data') parser.add_argument('filename', nargs='+', help='path to one or more files; ' 'wildcards are supported') args = parser.parse_args(args) # If `--table` was used but no format specified, # then use ascii.fixed_width by default if args.table is None: args.table = 'ascii.fixed_width' # Now print the desired headers try: if args.table: print_headers_as_table(args) else: print_headers_traditional(args) except OSError as e: # A 'Broken pipe' OSError may occur when stdout is closed prematurely, # eg. when calling `fitsheader file.fits | head`. We let this pass. pass
7e5d474b36ec63d34f2df7ed7e77d71286fedfa9a723a283d41b8105a8ca713f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ ``fitscheck`` is a command line script based on astropy.io.fits for verifying and updating the CHECKSUM and DATASUM keywords of .fits files. ``fitscheck`` can also detect and often fix other FITS standards violations. ``fitscheck`` facilitates re-writing the non-standard checksums originally generated by astropy.io.fits with standard checksums which will interoperate with CFITSIO. ``fitscheck`` will refuse to write new checksums if the checksum keywords are missing or their values are bad. Use ``--force`` to write new checksums regardless of whether or not they currently exist or pass. Use ``--ignore-missing`` to tolerate missing checksum keywords without comment. Example uses of fitscheck: 1. Add checksums:: $ fitscheck --write *.fits 2. Write new checksums, even if existing checksums are bad or missing:: $ fitscheck --write --force *.fits 3. Verify standard checksums and FITS compliance without changing the files:: $ fitscheck --compliance *.fits 4. Only check and fix compliance problems, ignoring checksums:: $ fitscheck --checksum none --compliance --write *.fits 5. Verify standard interoperable checksums:: $ fitscheck *.fits 6. Delete checksum keywords:: $ fitscheck --checksum remove --write *.fits """ import logging import optparse import os import sys import textwrap from ....tests.helper import catch_warnings from ... import fits log = logging.getLogger('fitscheck') def handle_options(args): if not len(args): args = ['-h'] parser = optparse.OptionParser(usage=textwrap.dedent(""" fitscheck [options] <.fits files...> .e.g. fitscheck example.fits Verifies and optionally re-writes the CHECKSUM and DATASUM keywords for a .fits file. Optionally detects and fixes FITS standard compliance problems. """.strip())) parser.add_option( '-k', '--checksum', dest='checksum_kind', type='choice', choices=['standard', 'remove', 'none'], help='Choose FITS checksum mode or none. Defaults standard.', default='standard', metavar='[standard | remove | none]') parser.add_option( '-w', '--write', dest='write_file', help='Write out file checksums and/or FITS compliance fixes.', default=False, action='store_true') parser.add_option( '-f', '--force', dest='force', help='Do file update even if original checksum was bad.', default=False, action='store_true') parser.add_option( '-c', '--compliance', dest='compliance', help='Do FITS compliance checking; fix if possible.', default=False, action='store_true') parser.add_option( '-i', '--ignore-missing', dest='ignore_missing', help='Ignore missing checksums.', default=False, action='store_true') parser.add_option( '-v', '--verbose', dest='verbose', help='Generate extra output.', default=False, action='store_true') global OPTIONS OPTIONS, fits_files = parser.parse_args(args) if OPTIONS.checksum_kind == 'none': OPTIONS.checksum_kind = False elif OPTIONS.checksum_kind == 'remove': OPTIONS.write_file = True OPTIONS.force = True return fits_files def setup_logging(): if OPTIONS.verbose: log.setLevel(logging.INFO) else: log.setLevel(logging.WARNING) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(message)s')) log.addHandler(handler) def verify_checksums(filename): """ Prints a message if any HDU in `filename` has a bad checksum or datasum. """ with catch_warnings() as wlist: with fits.open(filename, checksum=OPTIONS.checksum_kind) as hdulist: for i, hdu in enumerate(hdulist): # looping on HDUs is needed to read them and verify the # checksums if not OPTIONS.ignore_missing: if not hdu._checksum: log.warning('MISSING {!r} .. Checksum not found ' 'in HDU #{}'.format(filename, i)) return 1 if not hdu._datasum: log.warning('MISSING {!r} .. Datasum not found ' 'in HDU #{}'.format(filename, i)) return 1 for w in wlist: if str(w.message).startswith(('Checksum verification failed', 'Datasum verification failed')): log.warning('BAD %r %s', filename, str(w.message)) return 1 log.info('OK {!r}'.format(filename)) return 0 def verify_compliance(filename): """Check for FITS standard compliance.""" with fits.open(filename) as hdulist: try: hdulist.verify('exception') except fits.VerifyError as exc: log.warning('NONCOMPLIANT %r .. %s', filename, str(exc).replace('\n', ' ')) return 1 return 0 def update(filename): """ Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`. Also updates fixes standards violations if possible and requested. """ output_verify = 'silentfix' if OPTIONS.compliance else 'ignore' with fits.open(filename, do_not_scale_image_data=True, checksum=OPTIONS.checksum_kind, mode='update') as hdulist: hdulist.flush(output_verify=output_verify) def process_file(filename): """ Handle a single .fits file, returning the count of checksum and compliance errors. """ try: checksum_errors = verify_checksums(filename) if OPTIONS.compliance: compliance_errors = verify_compliance(filename) else: compliance_errors = 0 if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force: update(filename) return checksum_errors + compliance_errors except Exception as e: log.error('EXCEPTION {!r} .. {}'.format(filename, e)) return 1 def main(args=None): """ Processes command line parameters into options and files, then checks or update FITS DATASUM and CHECKSUM keywords for the specified files. """ errors = 0 fits_files = handle_options(args or sys.argv[1:]) setup_logging() for filename in fits_files: errors += process_file(filename) if errors: log.warning('{} errors'.format(errors)) return int(bool(errors))
2a982a68d7d1406ebb4ca714a5f5f88947b093e32666199bc02d0ca75be7d281
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ ``fitsinfo`` is a command-line script based on astropy.io.fits for printing a summary of the HDUs in one or more FITS files(s) to the standard output. Example usage of ``fitsinfo``: 1. Print a summary of the HDUs in a FITS file:: $ fitsinfo filename.fits Filename: filename.fits No. Name Type Cards Dimensions Format 0 PRIMARY PrimaryHDU 138 () 1 SCI ImageHDU 61 (800, 800) int16 2 SCI ImageHDU 61 (800, 800) int16 3 SCI ImageHDU 61 (800, 800) int16 4 SCI ImageHDU 61 (800, 800) int16 2. Print a summary of HDUs of all the FITS files in the current directory:: $ fitsinfo *.fits """ import argparse import astropy.io.fits as fits from astropy import log def fitsinfo(filename): """ Print a summary of the HDUs in a FITS file. Parameters ---------- filename : str The path to a FITS file. """ try: fits.info(filename) except OSError as e: log.error(str(e)) return def main(args=None): """The main function called by the `fitsinfo` script.""" parser = argparse.ArgumentParser( description=('Print a summary of the HDUs in a FITS file(s).')) parser.add_argument('filename', nargs='+', help='Path to one or more FITS files. ' 'Wildcards are supported.') args = parser.parse_args(args) for idx, filename in enumerate(args.filename): if idx > 0: print() fitsinfo(filename)
5e90853120b0d0b6440b8f24648bf398d2cefe5552d8f063ccb93e262def0a60
# Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import logging import optparse import os import sys import textwrap import warnings from ... import fits from ..util import fill from ....utils.exceptions import AstropyDeprecationWarning log = logging.getLogger('fitsdiff') USAGE = """ Compare two FITS image files and report the differences in header keywords and data. fitsdiff [options] filename1 filename2 where filename1 filename2 are the two files to be compared. They may also be wild cards, in such cases, they must be enclosed by double or single quotes, or they may be directory names. If both are directory names, all files in each of the directories will be included; if only one is a directory name, then the directory name will be prefixed to the file name(s) specified by the other argument. for example:: fitsdiff "*.fits" "/machine/data1" will compare all FITS files in the current directory to the corresponding files in the directory /machine/data1. """.strip() EPILOG = """ If the two files are identical within the specified conditions, it will report "No difference is found." If the value(s) of -c and -k takes the form '@filename', list is in the text file 'filename', and each line in that text file contains one keyword. Example ------- fitsdiff -k filename,filtnam1 -n 5 -r 1.e-6 test1.fits test2 This command will compare files test1.fits and test2.fits, report maximum of 5 different pixels values per extension, only report data values larger than 1.e-6 relative to each other, and will neglect the different values of keywords FILENAME and FILTNAM1 (or their very existence). fitsdiff command-line arguments can also be set using the environment variable FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present, each argument present will override the corresponding argument on the command-line unless the --exact option is specified. The FITSDIFF_SETTINGS environment variable exists to make it easier to change the behavior of fitsdiff on a global level, such as in a set of regression tests. """.strip() class HelpFormatter(optparse.TitledHelpFormatter): def format_epilog(self, epilog): return '\n{}\n'.format(fill(epilog, self.width)) def handle_options(argv=None): # This is a callback--less trouble than actually adding a new action type def store_list(option, opt, value, parser): setattr(parser.values, option.dest, []) # Accept either a comma-separated list or a filename (starting with @) # containing a value on each line if value and value[0] == '@': value = value[1:] if not os.path.exists(value): log.warning('{} argument {} does not exist'.format(opt, value)) return try: values = [v.strip() for v in open(value, 'r').readlines()] setattr(parser.values, option.dest, values) except OSError as exc: log.warning('reading {} for {} failed: {}; ignoring this ' 'argument'.format(value, opt, exc)) del exc else: setattr(parser.values, option.dest, [v.strip() for v in value.split(',')]) parser = optparse.OptionParser(usage=USAGE, epilog=EPILOG, formatter=HelpFormatter()) parser.add_option( '-q', '--quiet', action='store_true', help='Produce no output and just return a status code.') parser.add_option( '-n', '--num-diffs', type='int', default=10, dest='numdiffs', metavar='INTEGER', help='Max number of data differences (image pixel or table element) ' 'to report per extension (default %default).') parser.add_option( '-d', '--difference-tolerance', type='float', default=None, dest='tolerance', metavar='NUMBER', help='DEPRECATED. Alias for "--relative-tolerance". ' 'Deprecated, provided for backward compatibility (default %default).') parser.add_option( '-r', '--rtol', '--relative-tolerance', type='float', default=None, dest='rtol', metavar='NUMBER', help='The relative tolerance for comparison of two numbers, ' 'specifically two floating point numbers. This applies to data ' 'in both images and tables, and to floating point keyword values ' 'in headers (default %default).') parser.add_option( '-a', '--atol', '--absolute-tolerance', type='float', default=None, dest='atol', metavar='NUMBER', help='The absolute tolerance for comparison of two numbers, ' 'specifically two floating point numbers. This applies to data ' 'in both images and tables, and to floating point keyword values ' 'in headers (default %default).') parser.add_option( '-b', '--no-ignore-blanks', action='store_false', dest='ignore_blanks', default=True, help="Don't ignore trailing blanks (whitespace) in string values. " "Otherwise trailing blanks both in header keywords/values and in " "table column values) are not treated as significant i.e., " "without this option 'ABCDEF ' and 'ABCDEF' are considered " "equivalent. ") parser.add_option( '--no-ignore-blank-cards', action='store_false', dest='ignore_blank_cards', default=True, help="Don't ignore entirely blank cards in headers. Normally fitsdiff " "does not consider blank cards when comparing headers, but this " "will ensure that even blank cards match up. ") parser.add_option( '--exact', action='store_true', dest='exact_comparisons', default=False, help="Report ALL differences, " "overriding command-line options and FITSDIFF_SETTINGS. ") parser.add_option( '-o', '--output-file', metavar='FILE', help='Output results to this file; otherwise results are printed to ' 'stdout.') group = optparse.OptionGroup(parser, 'Header Comparison Options') group.add_option( '-k', '--ignore-keywords', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_keywords', metavar='KEYWORDS', help='Comma-separated list of keywords not to be compared. Keywords ' 'may contain wildcard patterns. To exclude all keywords, use ' '"*"; make sure to have double or single quotes around the ' 'asterisk on the command-line.') group.add_option( '-c', '--ignore-comments', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_comments', metavar='KEYWORDS', help='Comma-separated list of keywords whose comments will not be ' 'compared. Wildcards may be used as with --ignore-keywords.') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Table Comparison Options') group.add_option( '-f', '--ignore-fields', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_fields', metavar='COLUMNS', help='Comma-separated list of fields (i.e. columns) not to be ' 'compared. All columns may be excluded using "*" as with ' '--ignore-keywords.') parser.add_option_group(group) options, args = parser.parse_args(argv) # Determine which filenames to compare if len(args) != 2: parser.error('\n' + textwrap.fill( 'fitsdiff requires two arguments; see `fitsdiff --help` for more ' 'details.', parser.formatter.width)) return options, args def setup_logging(outfile=None): log.setLevel(logging.INFO) error_handler = logging.StreamHandler(sys.stderr) error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) error_handler.setLevel(logging.WARNING) log.addHandler(error_handler) if outfile is not None: output_handler = logging.FileHandler(outfile) else: output_handler = logging.StreamHandler() class LevelFilter(logging.Filter): """Log only messages matching the specified level.""" def __init__(self, name='', level=logging.NOTSET): logging.Filter.__init__(self, name) self.level = level def filter(self, rec): return rec.levelno == self.level # File output logs all messages, but stdout logs only INFO messages # (since errors are already logged to stderr) output_handler.addFilter(LevelFilter(level=logging.INFO)) output_handler.setFormatter(logging.Formatter('%(message)s')) log.addHandler(output_handler) def match_files(paths): if os.path.isfile(paths[0]) and os.path.isfile(paths[1]): # shortcut if both paths are files return [paths] dirnames = [None, None] filelists = [None, None] for i, path in enumerate(paths): if glob.has_magic(path): files = [os.path.split(f) for f in glob.glob(path)] if not files: log.error('Wildcard pattern %r did not match any files.', path) sys.exit(2) dirs, files = list(zip(*files)) if len(set(dirs)) > 1: log.error('Wildcard pattern %r should match only one ' 'directory.', path) sys.exit(2) dirnames[i] = set(dirs).pop() filelists[i] = sorted(files) elif os.path.isdir(path): dirnames[i] = path filelists[i] = sorted(os.listdir(path)) elif os.path.isfile(path): dirnames[i] = os.path.dirname(path) filelists[i] = [os.path.basename(path)] else: log.error( '%r is not an existing file, directory, or wildcard ' 'pattern; see `fitsdiff --help` for more usage help.', path) sys.exit(2) dirnames[i] = os.path.abspath(dirnames[i]) filematch = set(filelists[0]) & set(filelists[1]) for a, b in [(0, 1), (1, 0)]: if len(filelists[a]) > len(filematch) and not os.path.isdir(paths[a]): for extra in sorted(set(filelists[a]) - filematch): log.warning('%r has no match in %r', extra, dirnames[b]) return [(os.path.join(dirnames[0], f), os.path.join(dirnames[1], f)) for f in filematch] def main(args=None): args = args or sys.argv[1:] if 'FITSDIFF_SETTINGS' in os.environ: args = os.environ['FITSDIFF_SETTINGS'].split() + args opts, args = handle_options(args) if opts.tolerance is not None: warnings.warn( '"-d" ("--difference-tolerance") was deprecated in version 2.0 ' 'and will be removed in a future version. ' 'Use "-r" ("--relative-tolerance") instead.', AstropyDeprecationWarning) opts.rtol = opts.tolerance if opts.rtol is None: opts.rtol = 0.0 if opts.atol is None: opts.atol = 0.0 if opts.exact_comparisons: # override the options so that each is the most restrictive opts.ignore_keywords = [] opts.ignore_comments = [] opts.ignore_fields = [] opts.rtol = 0.0 opts.atol = 0.0 opts.ignore_blanks = False opts.ignore_blank_cards = False if not opts.quiet: setup_logging(opts.output_file) files = match_files(args) close_file = False if opts.quiet: out_file = None elif opts.output_file: out_file = open(opts.output_file, 'w') close_file = True else: out_file = sys.stdout identical = [] try: for a, b in files: # TODO: pass in any additional arguments here too diff = fits.diff.FITSDiff( a, b, ignore_keywords=opts.ignore_keywords, ignore_comments=opts.ignore_comments, ignore_fields=opts.ignore_fields, numdiffs=opts.numdiffs, rtol=opts.rtol, atol=opts.atol, ignore_blanks=opts.ignore_blanks, ignore_blank_cards=opts.ignore_blank_cards) diff.report(fileobj=out_file) identical.append(diff.identical) return int(not all(identical)) finally: if close_file: out_file.close() # Close the file if used for the logging output, and remove handlers to # avoid having them multiple times for unit tests. for handler in log.handlers: if isinstance(handler, logging.FileHandler): handler.close() log.removeHandler(handler)
245a6c95e3e4d932ce501dece403715b72d4ff81a5f4cbc90954b67e246d2f6d
# Licensed under a 3-clause BSD style license - see PYFITS.rst import sys import numpy as np from .base import DTYPE2BITPIX from .image import PrimaryHDU from .table import _TableLikeHDU from ..column import Column, ColDefs, FITS2NUMPY from ..fitsrec import FITS_rec, FITS_record from ..util import _is_int, _is_pseudo_unsigned, _unsigned_zero from ....utils import lazyproperty class Group(FITS_record): """ One group of the random group data. """ def __init__(self, input, row=0, start=None, end=None, step=None, base=None): super().__init__(input, row, start, end, step, base) @property def parnames(self): return self.array.parnames @property def data(self): # The last column in the coldefs is the data portion of the group return self.field(self.array._coldefs.names[-1]) @lazyproperty def _unique(self): return _par_indices(self.parnames) def par(self, parname): """ Get the group parameter value. """ if _is_int(parname): result = self.array[self.row][parname] else: indx = self._unique[parname.upper()] if len(indx) == 1: result = self.array[self.row][indx[0]] # if more than one group parameter have the same name else: result = self.array[self.row][indx[0]].astype('f8') for i in indx[1:]: result += self.array[self.row][i] return result def setpar(self, parname, value): """ Set the group parameter value. """ # TODO: It would be nice if, instead of requiring a multi-part value to # be an array, there were an *option* to automatically split the value # into multiple columns if it doesn't already fit in the array data # type. if _is_int(parname): self.array[self.row][parname] = value else: indx = self._unique[parname.upper()] if len(indx) == 1: self.array[self.row][indx[0]] = value # if more than one group parameter have the same name, the # value must be a list (or tuple) containing arrays else: if isinstance(value, (list, tuple)) and \ len(indx) == len(value): for i in range(len(indx)): self.array[self.row][indx[i]] = value[i] else: raise ValueError('Parameter value must be a sequence with ' '{} arrays/numbers.'.format(len(indx))) class GroupData(FITS_rec): """ Random groups data object. Allows structured access to FITS Group data in a manner analogous to tables. """ _record_type = Group def __new__(cls, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): """ Parameters ---------- input : array or FITS_rec instance input data, either the group data itself (a `numpy.ndarray`) or a record array (`FITS_rec`) which will contain both group parameter info and the data. The rest of the arguments are used only for the first case. bitpix : int data type as expressed in FITS ``BITPIX`` value (8, 16, 32, 64, -32, or -64) pardata : sequence of arrays parameter data, as a list of (numeric) arrays. parnames : sequence of str list of parameter names. bscale : int ``BSCALE`` of the data bzero : int ``BZERO`` of the data parbscales : sequence of int list of bscales for the parameters parbzeros : sequence of int list of bzeros for the parameters """ if not isinstance(input, FITS_rec): if pardata is None: npars = 0 else: npars = len(pardata) if parbscales is None: parbscales = [None] * npars if parbzeros is None: parbzeros = [None] * npars if parnames is None: parnames = ['PAR{}'.format(idx + 1) for idx in range(npars)] if len(parnames) != npars: raise ValueError('The number of parameter data arrays does ' 'not match the number of parameters.') unique_parnames = _unique_parnames(parnames + ['DATA']) if bitpix is None: bitpix = DTYPE2BITPIX[input.dtype.name] fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E' format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4' data_fmt = '{}{}'.format(str(input.shape[1:]), format) formats = ','.join(([format] * npars) + [data_fmt]) gcount = input.shape[0] cols = [Column(name=unique_parnames[idx], format=fits_fmt, bscale=parbscales[idx], bzero=parbzeros[idx]) for idx in range(npars)] cols.append(Column(name=unique_parnames[-1], format=fits_fmt, bscale=bscale, bzero=bzero)) coldefs = ColDefs(cols) self = FITS_rec.__new__(cls, np.rec.array(None, formats=formats, names=coldefs.names, shape=gcount)) # By default the data field will just be 'DATA', but it may be # uniquified if 'DATA' is already used by one of the group names self._data_field = unique_parnames[-1] self._coldefs = coldefs self.parnames = parnames for idx, name in enumerate(unique_parnames[:-1]): column = coldefs[idx] # Note: _get_scale_factors is used here and in other cases # below to determine whether the column has non-default # scale/zero factors. # TODO: Find a better way to do this than using this interface scale, zero = self._get_scale_factors(column)[3:5] if scale or zero: self._cache_field(name, pardata[idx]) else: np.rec.recarray.field(self, idx)[:] = pardata[idx] column = coldefs[self._data_field] scale, zero = self._get_scale_factors(column)[3:5] if scale or zero: self._cache_field(self._data_field, input) else: np.rec.recarray.field(self, npars)[:] = input else: self = FITS_rec.__new__(cls, input) self.parnames = None return self def __array_finalize__(self, obj): super().__array_finalize__(obj) if isinstance(obj, GroupData): self.parnames = obj.parnames elif isinstance(obj, FITS_rec): self.parnames = obj._coldefs.names def __getitem__(self, key): out = super().__getitem__(key) if isinstance(out, GroupData): out.parnames = self.parnames return out @property def data(self): """ The raw group data represented as a multi-dimensional `numpy.ndarray` array. """ # The last column in the coldefs is the data portion of the group return self.field(self._coldefs.names[-1]) @lazyproperty def _unique(self): return _par_indices(self.parnames) def par(self, parname): """ Get the group parameter values. """ if _is_int(parname): result = self.field(parname) else: indx = self._unique[parname.upper()] if len(indx) == 1: result = self.field(indx[0]) # if more than one group parameter have the same name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return result class GroupsHDU(PrimaryHDU, _TableLikeHDU): """ FITS Random Groups HDU class. See the :ref:`random-groups` section in the Astropy documentation for more details on working with this type of HDU. """ _bitpix2tform = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'} _data_type = GroupData _data_field = 'DATA' """ The name of the table record array field that will contain the group data for each group; 'DATA' by default, but may be preceded by any number of underscores if 'DATA' is already a parameter name """ def __init__(self, data=None, header=None): super().__init__(data=data, header=header) # Update the axes; GROUPS HDUs should always have at least one axis if len(self._axes) <= 0: self._axes = [0] self._header['NAXIS'] = 1 self._header.set('NAXIS1', 0, after='NAXIS') @classmethod def match_header(cls, header): keyword = header.cards[0].keyword return (keyword == 'SIMPLE' and 'GROUPS' in header and header['GROUPS'] is True) @lazyproperty def data(self): """ The data of a random group FITS file will be like a binary table's data. """ data = self._get_tbdata() data._coldefs = self.columns data.parnames = self.parnames del self.columns return data @lazyproperty def parnames(self): """The names of the group parameters as described by the header.""" pcount = self._header['PCOUNT'] # The FITS standard doesn't really say what to do if a parname is # missing, so for now just assume that won't happen return [self._header['PTYPE' + str(idx + 1)] for idx in range(pcount)] @lazyproperty def columns(self): if self._has_data and hasattr(self.data, '_coldefs'): return self.data._coldefs format = self._bitpix2tform[self._header['BITPIX']] pcount = self._header['PCOUNT'] parnames = [] bscales = [] bzeros = [] for idx in range(pcount): bscales.append(self._header.get('PSCAL' + str(idx + 1), None)) bzeros.append(self._header.get('PZERO' + str(idx + 1), None)) parnames.append(self._header['PTYPE' + str(idx + 1)]) formats = [format] * len(parnames) dim = [None] * len(parnames) # Now create columns from collected parameters, but first add the DATA # column too, to contain the group data. parnames.append('DATA') bscales.append(self._header.get('BSCALE')) bzeros.append(self._header.get('BZEROS')) data_shape = self.shape[:-1] formats.append(str(int(np.prod(data_shape))) + format) dim.append(data_shape) parnames = _unique_parnames(parnames) self._data_field = parnames[-1] cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero, dim=dim) for name, fmt, bscale, bzero, dim in zip(parnames, formats, bscales, bzeros, dim)] coldefs = ColDefs(cols) return coldefs @property def _nrows(self): if not self._data_loaded: # The number of 'groups' equates to the number of rows in the table # representation of the data return self._header.get('GCOUNT', 0) else: return len(self.data) @lazyproperty def _theap(self): # Only really a lazyproperty for symmetry with _TableBaseHDU return 0 @property def is_image(self): return False @property def size(self): """ Returns the size (in bytes) of the HDU's data part. """ size = 0 naxis = self._header.get('NAXIS', 0) # for random group image, NAXIS1 should be 0, so we skip NAXIS1. if naxis > 1: size = 1 for idx in range(1, naxis): size = size * self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size) // 8 return size def update_header(self): old_naxis = self._header.get('NAXIS', 0) if self._data_loaded: if isinstance(self.data, GroupData): self._axes = list(self.data.data.shape)[1:] self._axes.reverse() self._axes = [0] + self._axes field0 = self.data.dtype.names[0] field0_code = self.data.dtype.fields[field0][0].name elif self.data is None: self._axes = [0] field0_code = 'uint8' # For lack of a better default else: raise ValueError('incorrect array type') self._header['BITPIX'] = DTYPE2BITPIX[field0_code] self._header['NAXIS'] = len(self._axes) # add NAXISi if it does not exist for idx, axis in enumerate(self._axes): if (idx == 0): after = 'NAXIS' else: after = 'NAXIS' + str(idx) self._header.set('NAXIS' + str(idx + 1), axis, after=after) # delete extra NAXISi's for idx in range(len(self._axes) + 1, old_naxis + 1): try: del self._header['NAXIS' + str(idx)] except KeyError: pass if self._has_data and isinstance(self.data, GroupData): self._header.set('GROUPS', True, after='NAXIS' + str(len(self._axes))) self._header.set('PCOUNT', len(self.data.parnames), after='GROUPS') self._header.set('GCOUNT', len(self.data), after='PCOUNT') column = self.data._coldefs[self._data_field] scale, zero = self.data._get_scale_factors(column)[3:5] if scale: self._header.set('BSCALE', column.bscale) if zero: self._header.set('BZERO', column.bzero) for idx, name in enumerate(self.data.parnames): self._header.set('PTYPE' + str(idx + 1), name) column = self.data._coldefs[idx] scale, zero = self.data._get_scale_factors(column)[3:5] if scale: self._header.set('PSCAL' + str(idx + 1), column.bscale) if zero: self._header.set('PZERO' + str(idx + 1), column.bzero) # Update the position of the EXTEND keyword if it already exists if 'EXTEND' in self._header: if len(self._axes): after = 'NAXIS' + str(len(self._axes)) else: after = 'NAXIS' self._header.set('EXTEND', after=after) def _writedata_internal(self, fileobj): """ Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but we have to get the data's byte order a different way... TODO: Might be nice to store some indication of the data's byte order as an attribute or function so that we don't have to do this. """ size = 0 if self.data is not None: self.data._scale_back() # Based on the system type, determine the byteorders that # would need to be swapped to get to big-endian output if sys.byteorder == 'little': swap_types = ('<', '=') else: swap_types = ('<',) # deal with unsigned integer 16, 32 and 64 data if _is_pseudo_unsigned(self.data.dtype): # Convert the unsigned array to signed output = np.array( self.data - _unsigned_zero(self.data.dtype), dtype='>i{}'.format(self.data.dtype.itemsize)) should_swap = False else: output = self.data fname = self.data.dtype.names[0] byteorder = self.data.dtype.fields[fname][0].str[0] should_swap = (byteorder in swap_types) if not fileobj.simulateonly: if should_swap: if output.flags.writeable: output.byteswap(True) try: fileobj.writearray(output) finally: output.byteswap(True) else: # For read-only arrays, there is no way around making # a byteswapped copy of the data. fileobj.writearray(output.byteswap(False)) else: fileobj.writearray(output) size += output.size * output.itemsize return size def _verify(self, option='warn'): errs = super()._verify(option=option) # Verify locations and values of mandatory keywords. self.req_cards('NAXIS', 2, lambda v: (_is_int(v) and 1 <= v <= 999), 1, option, errs) self.req_cards('NAXIS1', 3, lambda v: (_is_int(v) and v == 0), 0, option, errs) after = self._header['NAXIS'] + 3 pos = lambda x: x >= after self.req_cards('GCOUNT', pos, _is_int, 1, option, errs) self.req_cards('PCOUNT', pos, _is_int, 0, option, errs) self.req_cards('GROUPS', pos, lambda v: (v is True), True, option, errs) return errs def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if self._has_data: # We have the data to be used. # Check the byte order of the data. If it is little endian we # must swap it before calculating the datasum. # TODO: Maybe check this on a per-field basis instead of assuming # that all fields have the same byte order? byteorder = \ self.data.dtype.fields[self.data.dtype.names[0]][0].str[0] if byteorder != '>': if self.data.flags.writeable: byteswapped = True d = self.data.byteswap(True) d.dtype = d.dtype.newbyteorder('>') else: # If the data is not writeable, we just make a byteswapped # copy and don't bother changing it back after d = self.data.byteswap(False) d.dtype = d.dtype.newbyteorder('>') byteswapped = False else: byteswapped = False d = self.data byte_data = d.view(type=np.ndarray, dtype=np.ubyte) cs = self._compute_checksum(byte_data) # If the data was byteswapped in this method then return it to # its original little-endian order. if byteswapped: d.byteswap(True) d.dtype = d.dtype.newbyteorder('<') return cs else: # This is the case where the data has not been read from the file # yet. We can handle that in a generic manner so we do it in the # base class. The other possibility is that there is no data at # all. This can also be handled in a generic manner. return super()._calculate_datasum() def _summary(self): summary = super()._summary() name, ver, classname, length, shape, format, gcount = summary # Drop the first axis from the shape if shape: shape = shape[1:] if shape and all(shape): # Update the format format = self.columns[0].dtype.name # Update the GCOUNT report gcount = '{} Groups {} Parameters'.format(self._gcount, self._pcount) return (name, ver, classname, length, shape, format, gcount) def _par_indices(names): """ Given a list of objects, returns a mapping of objects in that list to the index or indices at which that object was found in the list. """ unique = {} for idx, name in enumerate(names): # Case insensitive name = name.upper() if name in unique: unique[name].append(idx) else: unique[name] = [idx] return unique def _unique_parnames(names): """ Given a list of parnames, including possible duplicates, returns a new list of parnames with duplicates prepended by one or more underscores to make them unique. This is also case insensitive. """ upper_names = set() unique_names = [] for name in names: name_upper = name.upper() while name_upper in upper_names: name = '_' + name name_upper = '_' + name_upper unique_names.append(name) upper_names.add(name_upper) return unique_names
1dc3797b9236521475ce07edb57be136b335996c0c2a51f1e16327a0c1a3a08d
# Licensed under a 3-clause BSD style license - see PYFITS.rst import contextlib import csv import operator import os import re import sys import textwrap import warnings from contextlib import suppress import numpy as np from numpy import char as chararray from .base import DELAYED, _ValidHDU, ExtensionHDU # This module may have many dependencies on astropy.io.fits.column, but # astropy.io.fits.column has fewer dependencies overall, so it's easier to # keep table/column-related utilities in astropy.io.fits.column from .. import _numpy_hacks as nh from ..column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE, ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs, _AsciiColDefs, _FormatP, _FormatQ, _makep, _parse_tformat, _scalar_to_format, _convert_format, _cmp_recformats, _get_index) from ..fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields from ..header import Header, _pad_length from ..util import _is_int, _str_to_num from ....utils import lazyproperty from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning from ....utils.decorators import deprecated_renamed_argument class FITSTableDumpDialect(csv.excel): """ A CSV dialect for the Astropy format of ASCII dumps of FITS tables. """ delimiter = ' ' lineterminator = '\n' quotechar = '"' quoting = csv.QUOTE_ALL skipinitialspace = True class _TableLikeHDU(_ValidHDU): """ A class for HDUs that have table-like data. This is used for both Binary/ASCII tables as well as Random Access Group HDUs (which are otherwise too dissimilar for tables to use _TableBaseHDU directly). """ _data_type = FITS_rec _columns_type = ColDefs # TODO: Temporary flag representing whether uints are enabled; remove this # after restructuring to support uints by default on a per-column basis _uint = False @classmethod def match_header(cls, header): """ This is an abstract HDU type for HDUs that contain table-like data. This is even more abstract than _TableBaseHDU which is specifically for the standard ASCII and Binary Table types. """ raise NotImplementedError @classmethod def from_columns(cls, columns, header=None, nrows=0, fill=False, character_as_bytes=False, **kwargs): """ Given either a `ColDefs` object, a sequence of `Column` objects, or another table HDU or table data (a `FITS_rec` or multi-field `numpy.ndarray` or `numpy.recarray` object, return a new table HDU of the class this method was called on using the column definition from the input. See also `FITS_rec.from_columns`. Parameters ---------- columns : sequence of `Column`, `ColDefs`, or other The columns from which to create the table data, or an object with a column-like structure from which a `ColDefs` can be instantiated. This includes an existing `BinTableHDU` or `TableHDU`, or a `numpy.recarray` to give some examples. If these columns have data arrays attached that data may be used in initializing the new table. Otherwise the input columns will be used as a template for a new table with the requested number of rows. header : `Header` An optional `Header` object to instantiate the new HDU yet. Header keywords specifically related to defining the table structure (such as the "TXXXn" keywords like TTYPEn) will be overridden by the supplied column definitions, but all other informational and data model-specific keywords are kept. nrows : int Number of rows in the new table. If the input columns have data associated with them, the size of the largest input column is used. Otherwise the default is 0. fill : bool If `True`, will fill all cells with zeros or blanks. If `False`, copy the data from input, undefined cells will still be filled with zeros/blanks. character_as_bytes : bool Whether to return bytes for string columns when accessed from the HDU. By default this is `False` and (unicode) strings are returned, but for large tables this may use up a lot of memory. Notes ----- Any additional keyword arguments accepted by the HDU class's ``__init__`` may also be passed in as keyword arguments. """ coldefs = cls._columns_type(columns) data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes) hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs) coldefs._add_listener(hdu) return hdu @lazyproperty def columns(self): """ The :class:`ColDefs` objects describing the columns in this table. """ # The base class doesn't make any assumptions about where the column # definitions come from, so just return an empty ColDefs return ColDefs([]) @property def _nrows(self): """ Table-like HDUs must provide an attribute that specifies the number of rows in the HDU's table. For now this is an internal-only attribute. """ raise NotImplementedError def _get_tbdata(self): """Get the table data from an input HDU object.""" columns = self.columns # TODO: Details related to variable length arrays need to be dealt with # specifically in the BinTableHDU class, since they're a detail # specific to FITS binary tables if (any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats) and self._data_size is not None and self._data_size > self._theap): # We have a heap; include it in the raw_data raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset) data = raw_data[:self._theap].view(dtype=columns.dtype, type=np.rec.recarray) else: raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset) if raw_data is None: # This can happen when a brand new table HDU is being created # and no data has been assigned to the columns, which case just # return an empty array raw_data = np.array([], dtype=columns.dtype) data = raw_data.view(np.rec.recarray) self._init_tbdata(data) data = data.view(self._data_type) columns._add_listener(data) return data def _init_tbdata(self, data): columns = self.columns data.dtype = data.dtype.newbyteorder('>') # hack to enable pseudo-uint support data._uint = self._uint # pass datLoc, for P format data._heapoffset = self._theap data._heapsize = self._header['PCOUNT'] tbsize = self._header['NAXIS1'] * self._header['NAXIS2'] data._gap = self._theap - tbsize # pass the attributes for idx, col in enumerate(columns): # get the data for each column object from the rec.recarray col.array = data.field(idx) # delete the _arrays attribute so that it is recreated to point to the # new data placed in the column object above del columns._arrays def _update_column_added(self, columns, column): """ Update the data upon addition of a new column through the `ColDefs` interface. """ # TODO: It's not clear that this actually works--it probably does not. # This is what the code used to do before introduction of the # notifier interface, but I don't believe it actually worked (there are # several bug reports related to this...) if self._data_loaded: del self.data def _update_column_removed(self, columns, col_idx): """ Update the data upon removal of a column through the `ColDefs` interface. """ # For now this doesn't do anything fancy--it just deletes the data # attribute so that it is forced to be recreated again. It doesn't # change anything on the existing data recarray (this is also how this # worked before introducing the notifier interface) if self._data_loaded: del self.data class _TableBaseHDU(ExtensionHDU, _TableLikeHDU): """ FITS table extension base HDU class. Parameters ---------- data : array Data to be used. header : `Header` instance Header to be used. If the ``data`` is also specified, header keywords specifically related to defining the table structure (such as the "TXXXn" keywords like TTYPEn) will be overridden by the supplied column definitions, but all other informational and data model-specific keywords are kept. name : str Name to be populated in ``EXTNAME`` keyword. uint : bool, optional Set to `True` if the table contains unsigned integer columns. ver : int > 0 or None, optional The ver of the HDU, will be the value of the keyword ``EXTVER``. If not given or None, it defaults to the value of the ``EXTVER`` card of the ``header`` or 1. (default: None) character_as_bytes : bool Whether to return bytes for string columns. By default this is `False` and (unicode) strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. """ _manages_own_heap = False """ This flag implies that when writing VLA tables (P/Q format) the heap pointers that go into P/Q table columns should not be reordered or rearranged in any way by the default heap management code. This is included primarily as an optimization for compressed image HDUs which perform their own heap maintenance. """ def __init__(self, data=None, header=None, name=None, uint=False, ver=None, character_as_bytes=False): super().__init__(data=data, header=header, name=name, ver=ver) if header is not None and not isinstance(header, Header): raise ValueError('header must be a Header object.') self._uint = uint self._character_as_bytes = character_as_bytes if data is DELAYED: # this should never happen if header is None: raise ValueError('No header to setup HDU.') # if the file is read the first time, no need to copy, and keep it # unchanged else: self._header = header else: # construct a list of cards of minimal header cards = [ ('XTENSION', '', ''), ('BITPIX', 8, 'array data type'), ('NAXIS', 2, 'number of array dimensions'), ('NAXIS1', 0, 'length of dimension 1'), ('NAXIS2', 0, 'length of dimension 2'), ('PCOUNT', 0, 'number of group parameters'), ('GCOUNT', 1, 'number of groups'), ('TFIELDS', 0, 'number of table fields')] if header is not None: # Make a "copy" (not just a view) of the input header, since it # may get modified. the data is still a "view" (for now) hcopy = header.copy(strip=True) cards.extend(hcopy.cards) self._header = Header(cards) if isinstance(data, np.ndarray) and data.dtype.fields is not None: # self._data_type is FITS_rec. if isinstance(data, self._data_type): self.data = data else: self.data = self._data_type.from_columns(data) # TEMP: Special column keywords are normally overwritten by attributes # from Column objects. In Astropy 3.0, several new keywords are now # recognized as being special column keywords, but we don't # automatically clear them yet, as we need to raise a deprecation # warning for at least one major version. if header is not None: future_ignore = set() for keyword in self._header.keys(): match = TDEF_RE.match(keyword) try: base_keyword = match.group('label') except Exception: continue # skip if there is no match if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}: future_ignore.add(base_keyword) if future_ignore: keys = ', '.join(x + 'n' for x in sorted(future_ignore)) warnings.warn("The following keywords are now recognized as special " "column-related attributes and should be set via the " "Column objects: {0}. In future, these values will be " "dropped from manually specified headers automatically " "and replaced with values generated based on the " "Column objects.".format(keys), AstropyDeprecationWarning) # TODO: Too much of the code in this class uses header keywords # in making calculations related to the data size. This is # unreliable, however, in cases when users mess with the header # unintentionally--code that does this should be cleaned up. self._header['NAXIS1'] = self.data._raw_itemsize self._header['NAXIS2'] = self.data.shape[0] self._header['TFIELDS'] = len(self.data._coldefs) self.columns = self.data._coldefs self.update() with suppress(TypeError, AttributeError): # Make the ndarrays in the Column objects of the ColDefs # object of the HDU reference the same ndarray as the HDU's # FITS_rec object. for idx, col in enumerate(self.columns): col.array = self.data.field(idx) # Delete the _arrays attribute so that it is recreated to # point to the new data placed in the column objects above del self.columns._arrays elif data is None: pass else: raise TypeError('Table data has incorrect type.') if not (isinstance(self._header[0], str) and self._header[0].rstrip() == self._extension): self._header[0] = (self._extension, self._ext_comment) # Ensure that the correct EXTNAME is set on the new header if one was # created, or that it overrides the existing EXTNAME if different if name: self.name = name if ver is not None: self.ver = ver @classmethod def match_header(cls, header): """ This is an abstract type that implements the shared functionality of the ASCII and Binary Table HDU types, which should be used instead of this. """ raise NotImplementedError @lazyproperty def columns(self): """ The :class:`ColDefs` objects describing the columns in this table. """ if self._has_data and hasattr(self.data, '_coldefs'): return self.data._coldefs return self._columns_type(self) @lazyproperty def data(self): data = self._get_tbdata() data._coldefs = self.columns data._character_as_bytes = self._character_as_bytes # Columns should now just return a reference to the data._coldefs del self.columns return data @data.setter def data(self, data): if 'data' in self.__dict__: if self.__dict__['data'] is data: return else: self._data_replaced = True else: self._data_replaced = True self._modified = True if data is None and self.columns: # Create a new table with the same columns, but empty rows formats = ','.join(self.columns._recformats) data = np.rec.array(None, formats=formats, names=self.columns.names, shape=0) if isinstance(data, np.ndarray) and data.dtype.fields is not None: # Go ahead and always make a view, even if the data is already the # correct class (self._data_type) so we can update things like the # column defs, if necessary data = data.view(self._data_type) if not isinstance(data.columns, self._columns_type): # This would be the place, if the input data was for an ASCII # table and this is binary table, or vice versa, to convert the # data to the appropriate format for the table type new_columns = self._columns_type(data.columns) data = FITS_rec.from_columns(new_columns) self.__dict__['data'] = data self.columns = self.data.columns self.update() with suppress(TypeError, AttributeError): # Make the ndarrays in the Column objects of the ColDefs # object of the HDU reference the same ndarray as the HDU's # FITS_rec object. for idx, col in enumerate(self.columns): col.array = self.data.field(idx) # Delete the _arrays attribute so that it is recreated to # point to the new data placed in the column objects above del self.columns._arrays elif data is None: pass else: raise TypeError('Table data has incorrect type.') # returning the data signals to lazyproperty that we've already handled # setting self.__dict__['data'] return data @property def _nrows(self): if not self._data_loaded: return self._header.get('NAXIS2', 0) else: return len(self.data) @lazyproperty def _theap(self): size = self._header['NAXIS1'] * self._header['NAXIS2'] return self._header.get('THEAP', size) # TODO: Need to either rename this to update_header, for symmetry with the # Image HDUs, or just at some point deprecate it and remove it altogether, # since header updates should occur automatically when necessary... def update(self): """ Update header keywords to reflect recent changes of columns. """ self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS') self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1') self._header.set('TFIELDS', len(self.columns), after='GCOUNT') self._clear_table_keywords() self._populate_table_keywords() def copy(self): """ Make a copy of the table HDU, both header and data are copied. """ # touch the data, so it's defined (in the case of reading from a # FITS file) return self.__class__(data=self.data.copy(), header=self._header.copy()) def _prewriteto(self, checksum=False, inplace=False): if self._has_data: self.data._scale_back( update_heap_pointers=not self._manages_own_heap) # check TFIELDS and NAXIS2 self._header['TFIELDS'] = len(self.data._coldefs) self._header['NAXIS2'] = self.data.shape[0] # calculate PCOUNT, for variable length tables tbsize = self._header['NAXIS1'] * self._header['NAXIS2'] heapstart = self._header.get('THEAP', tbsize) self.data._gap = heapstart - tbsize pcount = self.data._heapsize + self.data._gap if pcount > 0: self._header['PCOUNT'] = pcount # update the other T****n keywords self._populate_table_keywords() # update TFORM for variable length columns for idx in range(self.data._nfields): format = self.data._coldefs._recformats[idx] if isinstance(format, _FormatP): _max = self.data.field(idx).max # May be either _FormatP or _FormatQ format_cls = format.__class__ format = format_cls(format.dtype, repeat=format.repeat, max=_max) self._header['TFORM' + str(idx + 1)] = format.tform return super()._prewriteto(checksum, inplace) def _verify(self, option='warn'): """ _TableBaseHDU verify method. """ errs = super()._verify(option=option) self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs) self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs) self.req_cards('TFIELDS', 7, lambda v: (_is_int(v) and v >= 0 and v <= 999), 0, option, errs) tfields = self._header['TFIELDS'] for idx in range(tfields): self.req_cards('TFORM' + str(idx + 1), None, None, None, option, errs) return errs def _summary(self): """ Summarize the HDU: name, dimensions, and formats. """ class_name = self.__class__.__name__ # if data is touched, use data info. if self._data_loaded: if self.data is None: shape, format = (), '' nrows = 0 else: nrows = len(self.data) ncols = len(self.columns) format = self.columns.formats # if data is not touched yet, use header info. else: shape = () nrows = self._header['NAXIS2'] ncols = self._header['TFIELDS'] format = ', '.join([self._header['TFORM' + str(j + 1)] for j in range(ncols)]) format = '[{}]'.format(format) dims = "{}R x {}C".format(nrows, ncols) ncards = len(self._header) return (self.name, self.ver, class_name, ncards, dims, format) def _update_column_removed(self, columns, idx): super()._update_column_removed(columns, idx) # Fix the header to reflect the column removal self._clear_table_keywords(index=idx) def _update_column_attribute_changed(self, column, col_idx, attr, old_value, new_value): """ Update the header when one of the column objects is updated. """ # base_keyword is the keyword without the index such as TDIM # while keyword is like TDIM1 base_keyword = ATTRIBUTE_TO_KEYWORD[attr] keyword = base_keyword + str(col_idx + 1) if keyword in self._header: if new_value is None: # If the new value is None, i.e. None was assigned to the # column attribute, then treat this as equivalent to deleting # that attribute del self._header[keyword] else: self._header[keyword] = new_value else: keyword_idx = KEYWORD_NAMES.index(base_keyword) # Determine the appropriate keyword to insert this one before/after # if it did not already exist in the header for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]): before_keyword += str(col_idx + 1) if before_keyword in self._header: self._header.insert(before_keyword, (keyword, new_value), after=True) break else: for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]: after_keyword += str(col_idx + 1) if after_keyword in self._header: self._header.insert(after_keyword, (keyword, new_value)) break else: # Just append self._header[keyword] = new_value def _clear_table_keywords(self, index=None): """ Wipe out any existing table definition keywords from the header. If specified, only clear keywords for the given table index (shifting up keywords for any other columns). The index is zero-based. Otherwise keywords for all columns. """ # First collect all the table structure related keyword in the header # into a single list so we can then sort them by index, which will be # useful later for updating the header in a sensible order (since the # header *might* not already be written in a reasonable order) table_keywords = [] for idx, keyword in enumerate(self._header.keys()): match = TDEF_RE.match(keyword) try: base_keyword = match.group('label') except Exception: continue # skip if there is no match if base_keyword in KEYWORD_TO_ATTRIBUTE: # TEMP: For Astropy 3.0 we don't clear away the following keywords # as we are first raising a deprecation warning that these will be # dropped automatically if they were specified in the header. We # can remove this once we are happy to break backward-compatibility if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}: continue num = int(match.group('num')) - 1 # convert to zero-base table_keywords.append((idx, match.group(0), base_keyword, num)) # First delete rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0), reverse=True) for idx, keyword, _, num in rev_sorted_idx_0: if index is None or index == num: del self._header[idx] # Now shift up remaining column keywords if only one column was cleared if index is not None: sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3)) for _, keyword, base_keyword, num in sorted_idx_3: if num <= index: continue old_card = self._header.cards[keyword] new_card = (base_keyword + str(num), old_card.value, old_card.comment) self._header.insert(keyword, new_card) del self._header[keyword] # Also decrement TFIELDS if 'TFIELDS' in self._header: self._header['TFIELDS'] -= 1 def _populate_table_keywords(self): """Populate the new table definition keywords from the header.""" for idx, column in enumerate(self.columns): for keyword, attr in KEYWORD_TO_ATTRIBUTE.items(): val = getattr(column, attr) if val is not None: keyword = keyword + str(idx + 1) self._header[keyword] = val class TableHDU(_TableBaseHDU): """ FITS ASCII table extension HDU class. Parameters ---------- data : array or `FITS_rec` Data to be used. header : `Header` Header to be used. name : str Name to be populated in ``EXTNAME`` keyword. ver : int > 0 or None, optional The ver of the HDU, will be the value of the keyword ``EXTVER``. If not given or None, it defaults to the value of the ``EXTVER`` card of the ``header`` or 1. (default: None) character_as_bytes : bool Whether to return bytes for string columns. By default this is `False` and (unicode) strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. """ _extension = 'TABLE' _ext_comment = 'ASCII table extension' _padding_byte = ' ' _columns_type = _AsciiColDefs __format_RE = re.compile( r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?') def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False): super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes) @classmethod def match_header(cls, header): card = header.cards[0] xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() return card.keyword == 'XTENSION' and xtension == cls._extension def _get_tbdata(self): columns = self.columns names = [n for idx, n in enumerate(columns.names)] # determine if there are duplicate field names and if there # are throw an exception dup = np.rec.find_duplicate(names) if dup: raise ValueError("Duplicate field names: {}".format(dup)) # TODO: Determine if this extra logic is necessary--I feel like the # _AsciiColDefs class should be responsible for telling the table what # its dtype should be... itemsize = columns.spans[-1] + columns.starts[-1] - 1 dtype = {} for idx in range(len(columns)): data_type = 'S' + str(columns.spans[idx]) if idx == len(columns) - 1: # The last column is padded out to the value of NAXIS1 if self._header['NAXIS1'] > itemsize: data_type = 'S' + str(columns.spans[idx] + self._header['NAXIS1'] - itemsize) dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1) raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset) data = raw_data.view(np.rec.recarray) self._init_tbdata(data) return data.view(self._data_type) def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if self._has_data: # We have the data to be used. # We need to pad the data to a block length before calculating # the datasum. bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte) padding = np.frombuffer(_pad_length(self.size) * b' ', dtype=np.ubyte) d = np.append(bytes_array, padding) cs = self._compute_checksum(d) return cs else: # This is the case where the data has not been read from the file # yet. We can handle that in a generic manner so we do it in the # base class. The other possibility is that there is no data at # all. This can also be handled in a generic manner. return super()._calculate_datasum() def _verify(self, option='warn'): """ `TableHDU` verify method. """ errs = super()._verify(option=option) self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs) tfields = self._header['TFIELDS'] for idx in range(tfields): self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option, errs) return errs class BinTableHDU(_TableBaseHDU): """ Binary table HDU class. Parameters ---------- data : array, `FITS_rec`, or `~astropy.table.Table` Data to be used. header : `Header` Header to be used. name : str Name to be populated in ``EXTNAME`` keyword. uint : bool, optional Set to `True` if the table contains unsigned integer columns. ver : int > 0 or None, optional The ver of the HDU, will be the value of the keyword ``EXTVER``. If not given or None, it defaults to the value of the ``EXTVER`` card of the ``header`` or 1. (default: None) character_as_bytes : bool Whether to return bytes for string columns. By default this is `False` and (unicode) strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. """ _extension = 'BINTABLE' _ext_comment = 'binary table extension' def __init__(self, data=None, header=None, name=None, uint=False, ver=None, character_as_bytes=False): from ....table import Table if isinstance(data, Table): from ..convenience import table_to_hdu hdu = table_to_hdu(data) if header is not None: hdu.header.update(header) data = hdu.data header = hdu.header super().__init__(data, header, name=name, uint=uint, ver=ver, character_as_bytes=character_as_bytes) @classmethod def match_header(cls, header): card = header.cards[0] xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() return (card.keyword == 'XTENSION' and xtension in (cls._extension, 'A3DTABLE')) def _calculate_datasum_with_heap(self): """ Calculate the value for the ``DATASUM`` card given the input data """ with _binary_table_byte_swap(self.data) as data: dout = data.view(type=np.ndarray, dtype=np.ubyte) csum = self._compute_checksum(dout) # Now add in the heap data to the checksum (we can skip any gap # between the table and the heap since it's all zeros and doesn't # contribute to the checksum # TODO: The following code may no longer be necessary since it is # now possible to get a pointer directly to the heap data as a # whole. That said, it is possible for the heap section to contain # data that is not actually pointed to by the table (i.e. garbage; # this *shouldn't* happen but it is not disallowed either)--need to # double check whether or not the checksum should include such # garbage for idx in range(data._nfields): if isinstance(data.columns._recformats[idx], _FormatP): for coldata in data.field(idx): # coldata should already be byteswapped from the call # to _binary_table_byte_swap if not len(coldata): continue csum = self._compute_checksum(coldata, csum) return csum def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if self._has_data: # This method calculates the datasum while incorporating any # heap data, which is obviously not handled from the base # _calculate_datasum return self._calculate_datasum_with_heap() else: # This is the case where the data has not been read from the file # yet. We can handle that in a generic manner so we do it in the # base class. The other possibility is that there is no data at # all. This can also be handled in a generic manner. return super()._calculate_datasum() def _writedata_internal(self, fileobj): size = 0 if self.data is None: return size with _binary_table_byte_swap(self.data) as data: if _has_unicode_fields(data): # If the raw data was a user-supplied recarray, we can't write # unicode columns directly to the file, so we have to switch # to a slower row-by-row write self._writedata_by_row(fileobj) else: fileobj.writearray(data) # write out the heap of variable length array columns this has # to be done after the "regular" data is written (above) fileobj.write((data._gap * '\0').encode('ascii')) nbytes = data._gap if not self._manages_own_heap: # Write the heap data one column at a time, in the order # that the data pointers appear in the column (regardless # if that data pointer has a different, previous heap # offset listed) for idx in range(data._nfields): if not isinstance(data.columns._recformats[idx], _FormatP): continue field = self.data.field(idx) for row in field: if len(row) > 0: nbytes += row.nbytes if not fileobj.simulateonly: fileobj.writearray(row) else: heap_data = data._get_heap_data() if len(heap_data) > 0: nbytes += len(heap_data) if not fileobj.simulateonly: fileobj.writearray(heap_data) data._heapsize = nbytes - data._gap size += nbytes size += self.data.size * self.data._raw_itemsize return size def _writedata_by_row(self, fileobj): fields = [self.data.field(idx) for idx in range(len(self.data.columns))] # Creating Record objects is expensive (as in # `for row in self.data:` so instead we just iterate over the row # indices and get one field at a time: for idx in range(len(self.data)): for field in fields: item = field[idx] field_width = None if field.dtype.kind == 'U': # Read the field *width* by reading past the field kind. i = field.dtype.str.index(field.dtype.kind) field_width = int(field.dtype.str[i+1:]) item = np.char.encode(item, 'ascii') fileobj.writearray(item) if field_width is not None: j = item.dtype.str.index(item.dtype.kind) item_length = int(item.dtype.str[j+1:]) # Fix padding problem (see #5296). padding = '\x00'*(field_width - item_length) fileobj.write(padding.encode('ascii')) _tdump_file_format = textwrap.dedent(""" - **datafile:** Each line of the data file represents one row of table data. The data is output one column at a time in column order. If a column contains an array, each element of the column array in the current row is output before moving on to the next column. Each row ends with a new line. Integer data is output right-justified in a 21-character field followed by a blank. Floating point data is output right justified using 'g' format in a 21-character field with 15 digits of precision, followed by a blank. String data that does not contain whitespace is output left-justified in a field whose width matches the width specified in the ``TFORM`` header parameter for the column, followed by a blank. When the string data contains whitespace characters, the string is enclosed in quotation marks (``""``). For the last data element in a row, the trailing blank in the field is replaced by a new line character. For column data containing variable length arrays ('P' format), the array data is preceded by the string ``'VLA_Length= '`` and the integer length of the array for that row, left-justified in a 21-character field, followed by a blank. .. note:: This format does *not* support variable length arrays using the ('Q' format) due to difficult to overcome ambiguities. What this means is that this file format cannot support VLA columns in tables stored in files that are over 2 GB in size. For column data representing a bit field ('X' format), each bit value in the field is output right-justified in a 21-character field as 1 (for true) or 0 (for false). - **cdfile:** Each line of the column definitions file provides the definitions for one column in the table. The line is broken up into 8, sixteen-character fields. The first field provides the column name (``TTYPEn``). The second field provides the column format (``TFORMn``). The third field provides the display format (``TDISPn``). The fourth field provides the physical units (``TUNITn``). The fifth field provides the dimensions for a multidimensional array (``TDIMn``). The sixth field provides the value that signifies an undefined value (``TNULLn``). The seventh field provides the scale factor (``TSCALn``). The eighth field provides the offset value (``TZEROn``). A field value of ``""`` is used to represent the case where no value is provided. - **hfile:** Each line of the header parameters file provides the definition of a single HDU header card as represented by the card image. """) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False): """ Dump the table HDU to a file in ASCII format. The table may be dumped in three separate files, one containing column definitions, one containing header parameters, and one for table data. Parameters ---------- datafile : file path, file object or file-like object, optional Output data file. The default is the root name of the fits file associated with this HDU appended with the extension ``.txt``. cdfile : file path, file object or file-like object, optional Output column definitions file. The default is `None`, no column definitions output is produced. hfile : file path, file object or file-like object, optional Output header parameters file. The default is `None`, no header parameters output is produced. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. Notes ----- The primary use for the `dump` method is to allow viewing and editing the table data and parameters in a standard text editor. The `load` method can be used to create a new table from the three plain text (ASCII) files. """ # check if the output files already exist exist = [] files = [datafile, cdfile, hfile] for f in files: if isinstance(f, str): if os.path.exists(f) and os.path.getsize(f) != 0: if overwrite: warnings.warn( "Overwriting existing file '{}'.".format(f), AstropyUserWarning) os.remove(f) else: exist.append(f) if exist: raise OSError(' '.join(["File '{}' already exists.".format(f) for f in exist])) # Process the data self._dump_data(datafile) # Process the column definitions if cdfile: self._dump_coldefs(cdfile) # Process the header parameters if hfile: self._header.tofile(hfile, sep='\n', endcard=False, padding=False) if isinstance(dump.__doc__, str): dump.__doc__ += _tdump_file_format.replace('\n', '\n ') def load(cls, datafile, cdfile=None, hfile=None, replace=False, header=None): """ Create a table from the input ASCII files. The input is from up to three separate files, one containing column definitions, one containing header parameters, and one containing column data. The column definition and header parameters files are not required. When absent the column definitions and/or header parameters are taken from the header object given in the header argument; otherwise sensible defaults are inferred (though this mode is not recommended). Parameters ---------- datafile : file path, file object or file-like object Input data file containing the table data in ASCII format. cdfile : file path, file object, file-like object, optional Input column definition file containing the names, formats, display formats, physical units, multidimensional array dimensions, undefined values, scale factors, and offsets associated with the columns in the table. If `None`, the column definitions are taken from the current values in this object. hfile : file path, file object, file-like object, optional Input parameter definition file containing the header parameter definitions to be associated with the table. If `None`, the header parameter definitions are taken from the current values in this objects header. replace : bool When `True`, indicates that the entire header should be replaced with the contents of the ASCII file instead of just updating the current header. header : Header object When the cdfile and hfile are missing, use this Header object in the creation of the new table and HDU. Otherwise this Header supercedes the keywords from hfile, which is only used to update values not present in this Header, unless ``replace=True`` in which this Header's values are completely replaced with the values from hfile. Notes ----- The primary use for the `load` method is to allow the input of ASCII data that was edited in a standard text editor of the table data and parameters. The `dump` method can be used to create the initial ASCII files. """ # Process the parameter file if header is None: header = Header() if hfile: if replace: header = Header.fromtextfile(hfile) else: header.extend(Header.fromtextfile(hfile), update=True, update_first=True) coldefs = None # Process the column definitions file if cdfile: coldefs = cls._load_coldefs(cdfile) # Process the data file data = cls._load_data(datafile, coldefs) if coldefs is None: coldefs = ColDefs(data) # Create a new HDU using the supplied header and data hdu = cls(data=data, header=header) hdu.columns = coldefs return hdu if isinstance(load.__doc__, str): load.__doc__ += _tdump_file_format.replace('\n', '\n ') load = classmethod(load) # Have to create a classmethod from this here instead of as a decorator; # otherwise we can't update __doc__ def _dump_data(self, fileobj): """ Write the table data in the ASCII format read by BinTableHDU.load() to fileobj. """ if not fileobj and self._file: root = os.path.splitext(self._file.name)[0] fileobj = root + '.txt' close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'w') close_file = True linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect) # Process each row of the table and output one row at a time def format_value(val, format): if format[0] == 'S': itemsize = int(format[1:]) return '{:{size}}'.format(val, size=itemsize) elif format in np.typecodes['AllInteger']: # output integer return '{:21d}'.format(val) elif format in np.typecodes['Complex']: return '{:21.15g}+{:.15g}j'.format(val.real, val.imag) elif format in np.typecodes['Float']: # output floating point return '{:#21.15g}'.format(val) for row in self.data: line = [] # the line for this row of the table # Process each column of the row. for column in self.columns: # format of data in a variable length array # where None means it is not a VLA: vla_format = None format = _convert_format(column.format) if isinstance(format, _FormatP): # P format means this is a variable length array so output # the length of the array for this row and set the format # for the VLA data line.append('VLA_Length=') line.append('{:21d}'.format(len(row[column.name]))) _, dtype, option = _parse_tformat(column.format) vla_format = FITS2NUMPY[option[0]][0] if vla_format: # Output the data for each element in the array for val in row[column.name].flat: line.append(format_value(val, vla_format)) else: # The column data is a single element dtype = self.data.dtype.fields[column.name][0] array_format = dtype.char if array_format == 'V': array_format = dtype.base.char if array_format == 'S': array_format += str(dtype.itemsize) if dtype.char == 'V': for value in row[column.name].flat: line.append(format_value(value, array_format)) else: line.append(format_value(row[column.name], array_format)) linewriter.writerow(line) if close_file: fileobj.close() def _dump_coldefs(self, fileobj): """ Write the column definition parameters in the ASCII format read by BinTableHDU.load() to fileobj. """ close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'w') close_file = True # Process each column of the table and output the result to the # file one at a time for column in self.columns: line = [column.name, column.format] attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero'] line += ['{:16s}'.format(value if value else '""') for value in (getattr(column, attr) for attr in attrs)] fileobj.write(' '.join(line)) fileobj.write('\n') if close_file: fileobj.close() @classmethod def _load_data(cls, fileobj, coldefs=None): """ Read the table data from the ASCII file output by BinTableHDU.dump(). """ close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'r') close_file = True initialpos = fileobj.tell() # We'll be returning here later linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect) # First we need to do some preprocessing on the file to find out how # much memory we'll need to reserve for the table. This is necessary # even if we already have the coldefs in order to determine how many # rows to reserve memory for vla_lengths = [] recformats = [] names = [] nrows = 0 if coldefs is not None: recformats = coldefs._recformats names = coldefs.names def update_recformats(value, idx): fitsformat = _scalar_to_format(value) recformat = _convert_format(fitsformat) if idx >= len(recformats): recformats.append(recformat) else: if _cmp_recformats(recformats[idx], recformat) < 0: recformats[idx] = recformat # TODO: The handling of VLAs could probably be simplified a bit for row in linereader: nrows += 1 if coldefs is not None: continue col = 0 idx = 0 while idx < len(row): if row[idx] == 'VLA_Length=': if col < len(vla_lengths): vla_length = vla_lengths[col] else: vla_length = int(row[idx + 1]) vla_lengths.append(vla_length) idx += 2 while vla_length: update_recformats(row[idx], col) vla_length -= 1 idx += 1 col += 1 else: if col >= len(vla_lengths): vla_lengths.append(None) update_recformats(row[idx], col) col += 1 idx += 1 # Update the recformats for any VLAs for idx, length in enumerate(vla_lengths): if length is not None: recformats[idx] = str(length) + recformats[idx] dtype = np.rec.format_parser(recformats, names, None).dtype # TODO: In the future maybe enable loading a bit at a time so that we # can convert from this format to an actual FITS file on disk without # needing enough physical memory to hold the entire thing at once hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype), nrows=nrows, fill=True) # TODO: It seems to me a lot of this could/should be handled from # within the FITS_rec class rather than here. data = hdu.data for idx, length in enumerate(vla_lengths): if length is not None: arr = data.columns._arrays[idx] dt = recformats[idx][len(str(length)):] # NOTE: FormatQ not supported here; it's hard to determine # whether or not it will be necessary to use a wider descriptor # type. The function documentation will have to serve as a # warning that this is not supported. recformats[idx] = _FormatP(dt, max=length) data.columns._recformats[idx] = recformats[idx] name = data.columns.names[idx] data._cache_field(name, _makep(arr, arr, recformats[idx])) def format_value(col, val): # Special formatting for a couple particular data types if recformats[col] == FITS2NUMPY['L']: return bool(int(val)) elif recformats[col] == FITS2NUMPY['M']: # For some reason, in arrays/fields where numpy expects a # complex it's not happy to take a string representation # (though it's happy to do that in other contexts), so we have # to convert the string representation for it: return complex(val) else: return val # Jump back to the start of the data and create a new line reader fileobj.seek(initialpos) linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect) for row, line in enumerate(linereader): col = 0 idx = 0 while idx < len(line): if line[idx] == 'VLA_Length=': vla_len = vla_lengths[col] idx += 2 slice_ = slice(idx, idx + vla_len) data[row][col][:] = line[idx:idx + vla_len] idx += vla_len elif dtype[col].shape: # This is an array column array_size = int(np.multiply.reduce(dtype[col].shape)) slice_ = slice(idx, idx + array_size) idx += array_size else: slice_ = None if slice_ is None: # This is a scalar row element data[row][col] = format_value(col, line[idx]) idx += 1 else: data[row][col].flat[:] = [format_value(col, val) for val in line[slice_]] col += 1 if close_file: fileobj.close() return data @classmethod def _load_coldefs(cls, fileobj): """ Read the table column definitions from the ASCII file output by BinTableHDU.dump(). """ close_file = False if isinstance(fileobj, str): fileobj = open(fileobj, 'r') close_file = True columns = [] for line in fileobj: words = line[:-1].split() kwargs = {} for key in ['name', 'format', 'disp', 'unit', 'dim']: kwargs[key] = words.pop(0).replace('""', '') for key in ['null', 'bscale', 'bzero']: word = words.pop(0).replace('""', '') if word: word = _str_to_num(word) kwargs[key] = word columns.append(Column(**kwargs)) if close_file: fileobj.close() return ColDefs(columns) @contextlib.contextmanager def _binary_table_byte_swap(data): """ Ensures that all the data of a binary FITS table (represented as a FITS_rec object) is in a big-endian byte order. Columns are swapped in-place one at a time, and then returned to their previous byte order when this context manager exits. Because a new dtype is needed to represent the byte-swapped columns, the new dtype is temporarily applied as well. """ orig_dtype = data.dtype names = [] formats = [] offsets = [] to_swap = [] if sys.byteorder == 'little': swap_types = ('<', '=') else: swap_types = ('<',) for idx, name in enumerate(orig_dtype.names): field = _get_recarray_field(data, idx) field_dtype, field_offset = orig_dtype.fields[name] names.append(name) formats.append(field_dtype) offsets.append(field_offset) if isinstance(field, chararray.chararray): continue # only swap unswapped # must use field_dtype.base here since for multi-element dtypes, # the .str with be '|V<N>' where <N> is the total bytes per element if field.itemsize > 1 and field_dtype.base.str[0] in swap_types: to_swap.append(field) # Override the dtype for this field in the new record dtype with # the byteswapped version formats[-1] = field_dtype.newbyteorder() # deal with var length table recformat = data.columns._recformats[idx] if isinstance(recformat, _FormatP): coldata = data.field(idx) for c in coldata: if (not isinstance(c, chararray.chararray) and c.itemsize > 1 and c.dtype.str[0] in swap_types): to_swap.append(c) for arr in reversed(to_swap): arr.byteswap(True) new_dtype = nh.realign_dtype(np.dtype(list(zip(names, formats))), offsets) data.dtype = new_dtype yield data for arr in to_swap: arr.byteswap(True) data.dtype = orig_dtype
a28b000f60b0aac57dd77d18dbc67f9826f855625d0c3c3fbf9672b00440bf89
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import io from ..file import _File from .base import NonstandardExtHDU from .hdulist import HDUList from ..header import Header, _pad_length from ..util import fileobj_name from ....utils import lazyproperty class FitsHDU(NonstandardExtHDU): """ A non-standard extension HDU for encapsulating entire FITS files within a single HDU of a container FITS file. These HDUs have an extension (that is an XTENSION keyword) of FITS. The FITS file contained in the HDU's data can be accessed by the `hdulist` attribute which returns the contained FITS file as an `HDUList` object. """ _extension = 'FITS' @lazyproperty def hdulist(self): self._file.seek(self._data_offset) fileobj = io.BytesIO() # Read the data into a BytesIO--reading directly from the file # won't work (at least for gzipped files) due to problems deep # within the gzip module that make it difficult to read gzip files # embedded in another file fileobj.write(self._file.read(self.size)) fileobj.seek(0) if self._header['COMPRESS']: fileobj = gzip.GzipFile(fileobj=fileobj) return HDUList.fromfile(fileobj, mode='readonly') @classmethod def fromfile(cls, filename, compress=False): """ Like `FitsHDU.fromhdulist()`, but creates a FitsHDU from a file on disk. Parameters ---------- filename : str The path to the file to read into a FitsHDU compress : bool, optional Gzip compress the FITS file """ return cls.fromhdulist(HDUList.fromfile(filename), compress=compress) @classmethod def fromhdulist(cls, hdulist, compress=False): """ Creates a new FitsHDU from a given HDUList object. Parameters ---------- hdulist : HDUList A valid Headerlet object. compress : bool, optional Gzip compress the FITS file """ fileobj = bs = io.BytesIO() if compress: if hasattr(hdulist, '_file'): name = fileobj_name(hdulist._file) else: name = None fileobj = gzip.GzipFile(name, mode='wb', fileobj=bs) hdulist.writeto(fileobj) if compress: fileobj.close() # A proper HDUList should still be padded out to a multiple of 2880 # technically speaking padding = (_pad_length(bs.tell()) * cls._padding_byte).encode('ascii') bs.write(padding) bs.seek(0) cards = [ ('XTENSION', cls._extension, 'FITS extension'), ('BITPIX', 8, 'array data type'), ('NAXIS', 1, 'number of array dimensions'), ('NAXIS1', len(bs.getvalue()), 'Axis length'), ('PCOUNT', 0, 'number of parameters'), ('GCOUNT', 1, 'number of groups'), ] # Add the XINDn keywords proposed by Perry, though nothing is done with # these at the moment if len(hdulist) > 1: for idx, hdu in enumerate(hdulist[1:]): cards.append(('XIND' + str(idx + 1), hdu._header_offset, 'byte offset of extension {}'.format(idx + 1))) cards.append(('COMPRESS', compress, 'Uses gzip compression')) header = Header(cards) return cls._readfrom_internal(_File(bs), header=header) @classmethod def match_header(cls, header): card = header.cards[0] if card.keyword != 'XTENSION': return False xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() return xtension == cls._extension # TODO: Add header verification def _summary(self): # TODO: Perhaps make this more descriptive... return (self.name, self.ver, self.__class__.__name__, len(self._header))
cb07c3eb8a3056cf9b1622fb101576f562f36829ca765db384cdee2358a8c400
# Licensed under a 3-clause BSD style license - see PYFITS.rst import sys import warnings import numpy as np from .base import DELAYED, _ValidHDU, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX from ..header import Header from ..util import _is_pseudo_unsigned, _unsigned_zero, _is_int from ..verify import VerifyWarning from ....utils import isiterable, lazyproperty class _ImageBaseHDU(_ValidHDU): """FITS image HDU base class. Attributes ---------- header image header data image data """ standard_keyword_comments = { 'SIMPLE': 'conforms to FITS standard', 'XTENSION': 'Image extension', 'BITPIX': 'array data type', 'NAXIS': 'number of array dimensions', 'GROUPS': 'has groups', 'PCOUNT': 'number of parameters', 'GCOUNT': 'number of groups' } def __init__(self, data=None, header=None, do_not_scale_image_data=False, uint=True, scale_back=False, ignore_blank=False, **kwargs): from .groups import GroupsHDU super().__init__(data=data, header=header) if header is not None: if not isinstance(header, Header): # TODO: Instead maybe try initializing a new Header object from # whatever is passed in as the header--there are various types # of objects that could work for this... raise ValueError('header must be a Header object') if data is DELAYED: # Presumably if data is DELAYED then this HDU is coming from an # open file, and was not created in memory if header is None: # this should never happen raise ValueError('No header to setup HDU.') # if the file is read the first time, no need to copy, and keep it # unchanged else: self._header = header else: # TODO: Some of this card manipulation should go into the # PrimaryHDU and GroupsHDU subclasses # construct a list of cards of minimal header if isinstance(self, ExtensionHDU): c0 = ('XTENSION', 'IMAGE', self.standard_keyword_comments['XTENSION']) else: c0 = ('SIMPLE', True, self.standard_keyword_comments['SIMPLE']) cards = [ c0, ('BITPIX', 8, self.standard_keyword_comments['BITPIX']), ('NAXIS', 0, self.standard_keyword_comments['NAXIS'])] if isinstance(self, GroupsHDU): cards.append(('GROUPS', True, self.standard_keyword_comments['GROUPS'])) if isinstance(self, (ExtensionHDU, GroupsHDU)): cards.append(('PCOUNT', 0, self.standard_keyword_comments['PCOUNT'])) cards.append(('GCOUNT', 1, self.standard_keyword_comments['GCOUNT'])) if header is not None: orig = header.copy() header = Header(cards) header.extend(orig, strip=True, update=True, end=True) else: header = Header(cards) self._header = header self._do_not_scale_image_data = do_not_scale_image_data self._uint = uint self._scale_back = scale_back # Keep track of whether BZERO/BSCALE were set from the header so that # values for self._orig_bzero and self._orig_bscale can be set # properly, if necessary, once the data has been set. bzero_in_header = 'BZERO' in self._header bscale_in_header = 'BSCALE' in self._header self._bzero = self._header.get('BZERO', 0) self._bscale = self._header.get('BSCALE', 1) # Save off other important values from the header needed to interpret # the image data self._axes = [self._header.get('NAXIS' + str(axis + 1), 0) for axis in range(self._header.get('NAXIS', 0))] # Not supplying a default for BITPIX makes sense because BITPIX # is either in the header or should be determined from the dtype of # the data (which occurs when the data is set). self._bitpix = self._header.get('BITPIX') self._gcount = self._header.get('GCOUNT', 1) self._pcount = self._header.get('PCOUNT', 0) self._blank = None if ignore_blank else self._header.get('BLANK') self._verify_blank() self._orig_bitpix = self._bitpix self._orig_blank = self._header.get('BLANK') # These get set again below, but need to be set to sensible defaults # here. self._orig_bzero = self._bzero self._orig_bscale = self._bscale # Set the name attribute if it was provided (if this is an ImageHDU # this will result in setting the EXTNAME keyword of the header as # well) if 'name' in kwargs and kwargs['name']: self.name = kwargs['name'] if 'ver' in kwargs and kwargs['ver']: self.ver = kwargs['ver'] # Set to True if the data or header is replaced, indicating that # update_header should be called self._modified = False if data is DELAYED: if (not do_not_scale_image_data and (self._bscale != 1 or self._bzero != 0)): # This indicates that when the data is accessed or written out # to a new file it will need to be rescaled self._data_needs_rescale = True return else: # Setting data will set set _bitpix, _bzero, and _bscale to the # appropriate BITPIX for the data, and always sets _bzero=0 and # _bscale=1. self.data = data self.update_header() # Check again for BITPIX/BSCALE/BZERO in case they changed when the # data was assigned. This can happen, for example, if the input # data is an unsigned int numpy array. self._bitpix = self._header.get('BITPIX') # Do not provide default values for BZERO and BSCALE here because # the keywords will have been deleted in the header if appropriate # after scaling. We do not want to put them back in if they # should not be there. self._bzero = self._header.get('BZERO') self._bscale = self._header.get('BSCALE') # Handle case where there was no BZERO/BSCALE in the initial header # but there should be a BSCALE/BZERO now that the data has been set. if not bzero_in_header: self._orig_bzero = self._bzero if not bscale_in_header: self._orig_bscale = self._bscale @classmethod def match_header(cls, header): """ _ImageBaseHDU is sort of an abstract class for HDUs containing image data (as opposed to table data) and should never be used directly. """ raise NotImplementedError @property def is_image(self): return True @property def section(self): """ Access a section of the image array without loading the entire array into memory. The :class:`Section` object returned by this attribute is not meant to be used directly by itself. Rather, slices of the section return the appropriate slice of the data, and loads *only* that section into memory. Sections are mostly obsoleted by memmap support, but should still be used to deal with very large scaled images. See the :ref:`data-sections` section of the Astropy documentation for more details. """ return Section(self) @property def shape(self): """ Shape of the image array--should be equivalent to ``self.data.shape``. """ # Determine from the values read from the header return tuple(reversed(self._axes)) @property def header(self): return self._header @header.setter def header(self, header): self._header = header self._modified = True self.update_header() @lazyproperty def data(self): """ Image/array data as a `~numpy.ndarray`. Please remember that the order of axes on an Numpy array are opposite of the order specified in the FITS file. For example for a 2D image the "rows" or y-axis are the first dimension, and the "columns" or x-axis are the second dimension. If the data is scaled using the BZERO and BSCALE parameters, this attribute returns the data scaled to its physical values unless the file was opened with ``do_not_scale_image_data=True``. """ if len(self._axes) < 1: return data = self._get_scaled_image_data(self._data_offset, self.shape) self._update_header_scale_info(data.dtype) return data @data.setter def data(self, data): if 'data' in self.__dict__ and self.__dict__['data'] is not None: if self.__dict__['data'] is data: return else: self._data_replaced = True was_unsigned = _is_pseudo_unsigned(self.__dict__['data'].dtype) else: self._data_replaced = True was_unsigned = False if data is not None and not isinstance(data, np.ndarray): # Try to coerce the data into a numpy array--this will work, on # some level, for most objects try: data = np.array(data) except Exception: raise TypeError('data object {!r} could not be coerced into an ' 'ndarray'.format(data)) self.__dict__['data'] = data self._modified = True if isinstance(data, np.ndarray): # Set new values of bitpix, bzero, and bscale now, but wait to # revise original values until header is updated. self._bitpix = DTYPE2BITPIX[data.dtype.name] self._bscale = 1 self._bzero = 0 self._blank = None self._axes = list(data.shape) self._axes.reverse() elif self.data is None: self._axes = [] else: raise ValueError('not a valid data array') # Update the header, including adding BZERO/BSCALE if new data is # unsigned. Does not change the values of self._bitpix, # self._orig_bitpix, etc. self.update_header() if (data is not None and was_unsigned): self._update_header_scale_info(data.dtype) # Keep _orig_bitpix as it was until header update is done, then # set it, to allow easier handling of the case of unsigned # integer data being converted to something else. Setting these here # is needed only for the case do_not_scale_image_data=True when # setting the data to unsigned int. # If necessary during initialization, i.e. if BSCALE and BZERO were # not in the header but the data was unsigned, the attributes below # will be update in __init__. self._orig_bitpix = self._bitpix self._orig_bscale = self._bscale self._orig_bzero = self._bzero # returning the data signals to lazyproperty that we've already handled # setting self.__dict__['data'] return data def update_header(self): """ Update the header keywords to agree with the data. """ if not (self._modified or self._header._modified or (self._has_data and self.shape != self.data.shape)): # Not likely that anything needs updating return old_naxis = self._header.get('NAXIS', 0) if 'BITPIX' not in self._header: bitpix_comment = self.standard_keyword_comments['BITPIX'] else: bitpix_comment = self._header.comments['BITPIX'] # Update the BITPIX keyword and ensure it's in the correct # location in the header self._header.set('BITPIX', self._bitpix, bitpix_comment, after=0) # If the data's shape has changed (this may have happened without our # noticing either via a direct update to the data.shape attribute) we # need to update the internal self._axes if self._has_data and self.shape != self.data.shape: self._axes = list(self.data.shape) self._axes.reverse() # Update the NAXIS keyword and ensure it's in the correct location in # the header if 'NAXIS' in self._header: naxis_comment = self._header.comments['NAXIS'] else: naxis_comment = self.standard_keyword_comments['NAXIS'] self._header.set('NAXIS', len(self._axes), naxis_comment, after='BITPIX') # TODO: This routine is repeated in several different classes--it # should probably be made available as a method on all standard HDU # types # add NAXISi if it does not exist for idx, axis in enumerate(self._axes): naxisn = 'NAXIS' + str(idx + 1) if naxisn in self._header: self._header[naxisn] = axis else: if (idx == 0): after = 'NAXIS' else: after = 'NAXIS' + str(idx) self._header.set(naxisn, axis, after=after) # delete extra NAXISi's for idx in range(len(self._axes) + 1, old_naxis + 1): try: del self._header['NAXIS' + str(idx)] except KeyError: pass if 'BLANK' in self._header: self._blank = self._header['BLANK'] # Add BSCALE/BZERO to header if data is unsigned int. self._update_uint_scale_keywords() self._modified = False def _update_header_scale_info(self, dtype=None): """ Delete BSCALE/BZERO from header if necessary. """ # Note that _dtype_for_bitpix determines the dtype based on the # "original" values of bitpix, bscale, and bzero, stored in # self._orig_bitpix, etc. It contains the logic for determining which # special cases of BZERO/BSCALE, if any, are auto-detected as following # the FITS unsigned int convention. # Added original_was_unsigned with the intent of facilitating the # special case of do_not_scale_image_data=True and uint=True # eventually. if self._dtype_for_bitpix() is not None: original_was_unsigned = self._dtype_for_bitpix().kind == 'u' else: original_was_unsigned = False if (self._do_not_scale_image_data or (self._orig_bzero == 0 and self._orig_bscale == 1)): return if dtype is None: dtype = self._dtype_for_bitpix() if (dtype is not None and dtype.kind == 'u' and (self._scale_back or self._scale_back is None)): # Data is pseudo-unsigned integers, and the scale_back option # was not explicitly set to False, so preserve all the scale # factors return for keyword in ['BSCALE', 'BZERO']: try: del self._header[keyword] # Since _update_header_scale_info can, currently, be called # *after* _prewriteto(), replace these with blank cards so # the header size doesn't change self._header.append() except KeyError: pass if dtype is None: dtype = self._dtype_for_bitpix() if dtype is not None: self._header['BITPIX'] = DTYPE2BITPIX[dtype.name] self._bzero = 0 self._bscale = 1 self._bitpix = self._header['BITPIX'] self._blank = self._header.pop('BLANK', None) def scale(self, type=None, option='old', bscale=None, bzero=None): """ Scale image data by using ``BSCALE``/``BZERO``. Call to this method will scale `data` and update the keywords of ``BSCALE`` and ``BZERO`` in the HDU's header. This method should only be used right before writing to the output file, as the data will be scaled and is therefore not very usable after the call. Parameters ---------- type : str, optional destination data type, use a string representing a numpy dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is `None`, use the current data type. option : str, optional How to scale the data: ``"old"`` uses the original ``BSCALE`` and ``BZERO`` values from when the data was read/created (defaulting to 1 and 0 if they don't exist). For integer data only, ``"minmax"`` uses the minimum and maximum of the data to scale. User-specified ``bscale``/``bzero`` values always take precedence. bscale, bzero : int, optional User-specified ``BSCALE`` and ``BZERO`` values """ # Disable blank support for now self._scale_internal(type=type, option=option, bscale=bscale, bzero=bzero, blank=None) def _scale_internal(self, type=None, option='old', bscale=None, bzero=None, blank=0): """ This is an internal implementation of the `scale` method, which also supports handling BLANK properly. TODO: This is only needed for fixing #3865 without introducing any public API changes. We should support BLANK better when rescaling data, and when that is added the need for this internal interface should go away. Note: the default of ``blank=0`` merely reflects the current behavior, and is not necessarily a deliberate choice (better would be to disallow conversion of floats to ints without specifying a BLANK if there are NaN/inf values). """ if self.data is None: return # Determine the destination (numpy) data type if type is None: type = BITPIX2DTYPE[self._bitpix] _type = getattr(np, type) # Determine how to scale the data # bscale and bzero takes priority if bscale is not None and bzero is not None: _scale = bscale _zero = bzero elif bscale is not None: _scale = bscale _zero = 0 elif bzero is not None: _scale = 1 _zero = bzero elif (option == 'old' and self._orig_bscale is not None and self._orig_bzero is not None): _scale = self._orig_bscale _zero = self._orig_bzero elif option == 'minmax' and not issubclass(_type, np.floating): min = np.minimum.reduce(self.data.flat) max = np.maximum.reduce(self.data.flat) if _type == np.uint8: # uint8 case _zero = min _scale = (max - min) / (2.0 ** 8 - 1) else: _zero = (max + min) / 2.0 # throw away -2^N nbytes = 8 * _type().itemsize _scale = (max - min) / (2.0 ** nbytes - 2) else: _scale = 1 _zero = 0 # Do the scaling if _zero != 0: # 0.9.6.3 to avoid out of range error for BZERO = +32768 # We have to explcitly cast _zero to prevent numpy from raising an # error when doing self.data -= zero, and we do this instead of # self.data = self.data - zero to avoid doubling memory usage. np.add(self.data, -_zero, out=self.data, casting='unsafe') self._header['BZERO'] = _zero else: try: del self._header['BZERO'] except KeyError: pass if _scale and _scale != 1: self.data = self.data / _scale self._header['BSCALE'] = _scale else: try: del self._header['BSCALE'] except KeyError: pass # Set blanks if blank is not None and issubclass(_type, np.integer): # TODO: Perhaps check that the requested BLANK value fits in the # integer type being scaled to? self.data[np.isnan(self.data)] = blank self._header['BLANK'] = blank if self.data.dtype.type != _type: self.data = np.array(np.around(self.data), dtype=_type) # Update the BITPIX Card to match the data self._bitpix = DTYPE2BITPIX[self.data.dtype.name] self._bzero = self._header.get('BZERO', 0) self._bscale = self._header.get('BSCALE', 1) self._blank = blank self._header['BITPIX'] = self._bitpix # Since the image has been manually scaled, the current # bitpix/bzero/bscale now serve as the 'original' scaling of the image, # as though the original image has been completely replaced self._orig_bitpix = self._bitpix self._orig_bzero = self._bzero self._orig_bscale = self._bscale self._orig_blank = self._blank def _verify(self, option='warn'): # update_header can fix some things that would otherwise cause # verification to fail, so do that now... self.update_header() self._verify_blank() return super()._verify(option) def _verify_blank(self): # Probably not the best place for this (it should probably happen # in _verify as well) but I want to be able to raise this warning # both when the HDU is created and when written if self._blank is None: return messages = [] # TODO: Once the FITSSchema framewhere is merged these warnings # should be handled by the schema if not _is_int(self._blank): messages.append( "Invalid value for 'BLANK' keyword in header: {0!r} " "The 'BLANK' keyword must be an integer. It will be " "ignored in the meantime.".format(self._blank)) self._blank = None if not self._bitpix > 0: messages.append( "Invalid 'BLANK' keyword in header. The 'BLANK' keyword " "is only applicable to integer data, and will be ignored " "in this HDU.") self._blank = None for msg in messages: warnings.warn(msg, VerifyWarning) def _prewriteto(self, checksum=False, inplace=False): if self._scale_back: self._scale_internal(BITPIX2DTYPE[self._orig_bitpix], blank=self._orig_blank) self.update_header() if not inplace and self._data_needs_rescale: # Go ahead and load the scaled image data and update the header # with the correct post-rescaling headers _ = self.data return super()._prewriteto(checksum, inplace) def _writedata_internal(self, fileobj): size = 0 if self.data is not None: # Based on the system type, determine the byteorders that # would need to be swapped to get to big-endian output if sys.byteorder == 'little': swap_types = ('<', '=') else: swap_types = ('<',) # deal with unsigned integer 16, 32 and 64 data if _is_pseudo_unsigned(self.data.dtype): # Convert the unsigned array to signed output = np.array( self.data - _unsigned_zero(self.data.dtype), dtype='>i{}'.format(self.data.dtype.itemsize)) should_swap = False else: output = self.data byteorder = output.dtype.str[0] should_swap = (byteorder in swap_types) if not fileobj.simulateonly: if should_swap: if output.flags.writeable: output.byteswap(True) try: fileobj.writearray(output) finally: output.byteswap(True) else: # For read-only arrays, there is no way around making # a byteswapped copy of the data. fileobj.writearray(output.byteswap(False)) else: fileobj.writearray(output) size += output.size * output.itemsize return size def _dtype_for_bitpix(self): """ Determine the dtype that the data should be converted to depending on the BITPIX value in the header, and possibly on the BSCALE value as well. Returns None if there should not be any change. """ bitpix = self._orig_bitpix # Handle possible conversion to uints if enabled if self._uint and self._orig_bscale == 1: for bits, dtype in ((16, np.dtype('uint16')), (32, np.dtype('uint32')), (64, np.dtype('uint64'))): if bitpix == bits and self._orig_bzero == 1 << (bits - 1): return dtype if bitpix > 16: # scale integers to Float64 return np.dtype('float64') elif bitpix > 0: # scale integers to Float32 return np.dtype('float32') def _convert_pseudo_unsigned(self, data): """ Handle "pseudo-unsigned" integers, if the user requested it. Returns the converted data array if so; otherwise returns None. In this case case, we don't need to handle BLANK to convert it to NAN, since we can't do NaNs with integers, anyway, i.e. the user is responsible for managing blanks. """ dtype = self._dtype_for_bitpix() # bool(dtype) is always False--have to explicitly compare to None; this # caused a fair amount of hair loss if dtype is not None and dtype.kind == 'u': # Convert the input raw data into an unsigned integer array and # then scale the data adjusting for the value of BZERO. Note that # we subtract the value of BZERO instead of adding because of the # way numpy converts the raw signed array into an unsigned array. bits = dtype.itemsize * 8 data = np.array(data, dtype=dtype) data -= np.uint64(1 << (bits - 1)) return data def _get_scaled_image_data(self, offset, shape): """ Internal function for reading image data from a file and apply scale factors to it. Normally this is used for the entire image, but it supports alternate offset/shape for Section support. """ code = BITPIX2DTYPE[self._orig_bitpix] raw_data = self._get_raw_data(shape, code, offset) raw_data.dtype = raw_data.dtype.newbyteorder('>') if self._do_not_scale_image_data or ( self._orig_bzero == 0 and self._orig_bscale == 1 and self._blank is None): # No further conversion of the data is necessary return raw_data try: if self._file.strict_memmap: raise ValueError("Cannot load a memory-mapped image: " "BZERO/BSCALE/BLANK header keywords present. " "Set memmap=False.") except AttributeError: # strict_memmap not set pass data = None if not (self._orig_bzero == 0 and self._orig_bscale == 1): data = self._convert_pseudo_unsigned(raw_data) if data is None: # In these cases, we end up with floating-point arrays and have to # apply bscale and bzero. We may have to handle BLANK and convert # to NaN in the resulting floating-point arrays. # The BLANK keyword should only be applied for integer data (this # is checked in __init__ but it can't hurt to double check here) blanks = None if self._blank is not None and self._bitpix > 0: blanks = raw_data.flat == self._blank # The size of blanks in bytes is the number of elements in # raw_data.flat. However, if we use np.where instead we will # only use 8 bytes for each index where the condition is true. # So if the number of blank items is fewer than # len(raw_data.flat) / 8, using np.where will use less memory if blanks.sum() < len(blanks) / 8: blanks = np.where(blanks) new_dtype = self._dtype_for_bitpix() if new_dtype is not None: data = np.array(raw_data, dtype=new_dtype) else: # floating point cases if self._file is not None and self._file.memmap: data = raw_data.copy() elif not raw_data.flags.writeable: # create a writeable copy if needed data = raw_data.copy() # if not memmap, use the space already in memory else: data = raw_data del raw_data if self._orig_bscale != 1: np.multiply(data, self._orig_bscale, data) if self._orig_bzero != 0: data += self._orig_bzero if self._blank: data.flat[blanks] = np.nan return data def _summary(self): """ Summarize the HDU: name, dimensions, and formats. """ class_name = self.__class__.__name__ # if data is touched, use data info. if self._data_loaded: if self.data is None: format = '' else: format = self.data.dtype.name format = format[format.rfind('.')+1:] else: if self.shape and all(self.shape): # Only show the format if all the dimensions are non-zero # if data is not touched yet, use header info. format = BITPIX2DTYPE[self._bitpix] else: format = '' if (format and not self._do_not_scale_image_data and (self._orig_bscale != 1 or self._orig_bzero != 0)): new_dtype = self._dtype_for_bitpix() if new_dtype is not None: format += ' (rescales to {0})'.format(new_dtype.name) # Display shape in FITS-order shape = tuple(reversed(self.shape)) return (self.name, self.ver, class_name, len(self._header), shape, format, '') def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if self._has_data: # We have the data to be used. d = self.data # First handle the special case where the data is unsigned integer # 16, 32 or 64 if _is_pseudo_unsigned(self.data.dtype): d = np.array(self.data - _unsigned_zero(self.data.dtype), dtype='i{}'.format(self.data.dtype.itemsize)) # Check the byte order of the data. If it is little endian we # must swap it before calculating the datasum. if d.dtype.str[0] != '>': if d.flags.writeable: byteswapped = True d = d.byteswap(True) d.dtype = d.dtype.newbyteorder('>') else: # If the data is not writeable, we just make a byteswapped # copy and don't bother changing it back after d = d.byteswap(False) d.dtype = d.dtype.newbyteorder('>') byteswapped = False else: byteswapped = False cs = self._compute_checksum(d.flatten().view(np.uint8)) # If the data was byteswapped in this method then return it to # its original little-endian order. if byteswapped and not _is_pseudo_unsigned(self.data.dtype): d.byteswap(True) d.dtype = d.dtype.newbyteorder('<') return cs else: # This is the case where the data has not been read from the file # yet. We can handle that in a generic manner so we do it in the # base class. The other possibility is that there is no data at # all. This can also be handled in a generic manner. return super()._calculate_datasum() class Section: """ Image section. Slices of this object load the corresponding section of an image array from the underlying FITS file on disk, and applies any BSCALE/BZERO factors. Section slices cannot be assigned to, and modifications to a section are not saved back to the underlying file. See the :ref:`data-sections` section of the Astropy documentation for more details. """ def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key): if not isinstance(key, tuple): key = (key,) naxis = len(self.hdu.shape) return_scalar = (all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis) if not any(k is Ellipsis for k in key): # We can always add a ... at the end, after making note of whether # to return a scalar. key += Ellipsis, ellipsis_count = len([k for k in key if k is Ellipsis]) if len(key) - ellipsis_count > naxis or ellipsis_count > 1: raise IndexError('too many indices for array') # Insert extra dimensions as needed. idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis) key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx+1:] return_0dim = (all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis) dims = [] offset = 0 # Find all leading axes for which a single point is used. for idx in range(naxis): axis = self.hdu.shape[idx] indx = _IndexInfo(key[idx], axis) offset = offset * axis + indx.offset if not _is_int(key[idx]): dims.append(indx.npts) break is_contiguous = indx.contiguous for jdx in range(idx + 1, naxis): axis = self.hdu.shape[jdx] indx = _IndexInfo(key[jdx], axis) dims.append(indx.npts) if indx.npts == axis and indx.contiguous: # The offset needs to multiply the length of all remaining axes offset *= axis else: is_contiguous = False if is_contiguous: dims = tuple(dims) or (1,) bitpix = self.hdu._orig_bitpix offset = self.hdu._data_offset + offset * abs(bitpix) // 8 data = self.hdu._get_scaled_image_data(offset, dims) else: data = self._getdata(key) if return_scalar: data = data.item() elif return_0dim: data = data.squeeze() return data def _getdata(self, keys): for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)): if isinstance(key, slice): ks = range(*key.indices(axis)) break elif isiterable(key): # Handle both integer and boolean arrays. ks = np.arange(axis, dtype=int)[key] break # This should always break at some point if _getdata is called. data = [self[keys[:idx] + (k,) + keys[idx + 1:]] for k in ks] if any(isinstance(key, slice) or isiterable(key) for key in keys[idx + 1:]): # data contains multidimensional arrays; combine them. return np.array(data) else: # Only singleton dimensions remain; concatenate in a 1D array. return np.concatenate([np.atleast_1d(array) for array in data]) class PrimaryHDU(_ImageBaseHDU): """ FITS primary HDU class. """ _default_name = 'PRIMARY' def __init__(self, data=None, header=None, do_not_scale_image_data=False, ignore_blank=False, uint=True, scale_back=None): """ Construct a primary HDU. Parameters ---------- data : array or DELAYED, optional The data in the HDU. header : Header instance, optional The header to be used (as a template). If ``header`` is `None`, a minimal header will be provided. do_not_scale_image_data : bool, optional If `True`, image data is not scaled using BSCALE/BZERO values when read. (default: False) ignore_blank : bool, optional If `True`, the BLANK header keyword will be ignored if present. Otherwise, pixels equal to this value will be replaced with NaNs. (default: False) uint : bool, optional Interpret signed integer data where ``BZERO`` is the central value and ``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as ``uint16`` data. (default: True) scale_back : bool, optional If `True`, when saving changes to a file that contained scaled image data, restore the data to the original type and reapply the original BSCALE/BZERO values. This could lead to loss of accuracy if scaling back to integer values after performing floating point operations on the data. Pseudo-unsigned integers are automatically rescaled unless scale_back is explicitly set to `False`. (default: None) """ super().__init__( data=data, header=header, do_not_scale_image_data=do_not_scale_image_data, uint=uint, ignore_blank=ignore_blank, scale_back=scale_back) # insert the keywords EXTEND if header is None: dim = self._header['NAXIS'] if dim == 0: dim = '' self._header.set('EXTEND', True, after='NAXIS' + str(dim)) @classmethod def match_header(cls, header): card = header.cards[0] # Due to problems discussed in #5808, we cannot assume the 'GROUPS' # keyword to be True/False, have to check the value return (card.keyword == 'SIMPLE' and ('GROUPS' not in header or header['GROUPS'] != True) and # noqa card.value) def update_header(self): super().update_header() # Update the position of the EXTEND keyword if it already exists if 'EXTEND' in self._header: if len(self._axes): after = 'NAXIS' + str(len(self._axes)) else: after = 'NAXIS' self._header.set('EXTEND', after=after) def _verify(self, option='warn'): errs = super()._verify(option=option) # Verify location and value of mandatory keywords. # The EXTEND keyword is only mandatory if the HDU has extensions; this # condition is checked by the HDUList object. However, if we already # have an EXTEND keyword check that its position is correct if 'EXTEND' in self._header: naxis = self._header.get('NAXIS', 0) self.req_cards('EXTEND', naxis + 3, lambda v: isinstance(v, bool), True, option, errs) return errs class ImageHDU(_ImageBaseHDU, ExtensionHDU): """ FITS image extension HDU class. """ _extension = 'IMAGE' def __init__(self, data=None, header=None, name=None, do_not_scale_image_data=False, uint=True, scale_back=None, ver=None): """ Construct an image HDU. Parameters ---------- data : array The data in the HDU. header : Header instance The header to be used (as a template). If ``header`` is `None`, a minimal header will be provided. name : str, optional The name of the HDU, will be the value of the keyword ``EXTNAME``. do_not_scale_image_data : bool, optional If `True`, image data is not scaled using BSCALE/BZERO values when read. (default: False) uint : bool, optional Interpret signed integer data where ``BZERO`` is the central value and ``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as ``uint16`` data. (default: True) scale_back : bool, optional If `True`, when saving changes to a file that contained scaled image data, restore the data to the original type and reapply the original BSCALE/BZERO values. This could lead to loss of accuracy if scaling back to integer values after performing floating point operations on the data. Pseudo-unsigned integers are automatically rescaled unless scale_back is explicitly set to `False`. (default: None) ver : int > 0 or None, optional The ver of the HDU, will be the value of the keyword ``EXTVER``. If not given or None, it defaults to the value of the ``EXTVER`` card of the ``header`` or 1. (default: None) """ # This __init__ currently does nothing differently from the base class, # and is only explicitly defined for the docstring. super().__init__( data=data, header=header, name=name, do_not_scale_image_data=do_not_scale_image_data, uint=uint, scale_back=scale_back, ver=ver) @classmethod def match_header(cls, header): card = header.cards[0] xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() return card.keyword == 'XTENSION' and xtension == cls._extension def _verify(self, option='warn'): """ ImageHDU verify method. """ errs = super()._verify(option=option) naxis = self._header.get('NAXIS', 0) # PCOUNT must == 0, GCOUNT must == 1; the former is verified in # ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT # to be >= 0, so we need to check it here self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v == 0), 0, option, errs) return errs class _IndexInfo: def __init__(self, indx, naxis): if _is_int(indx): if 0 <= indx < naxis: self.npts = 1 self.offset = indx self.contiguous = True else: raise IndexError('Index {} out of range.'.format(indx)) elif isinstance(indx, slice): start, stop, step = indx.indices(naxis) self.npts = (stop - start) // step self.offset = start self.contiguous = step == 1 elif isiterable(indx): self.npts = len(indx) self.offset = 0 self.contiguous = False else: raise IndexError('Illegal index {}'.format(indx))
44c4a18bf034475139abb7440260bdc63ad57a10682336189740c0da6696d8c6
# Licensed under a 3-clause BSD style license - see PYFITS.rst import ctypes import gc import itertools import math import re import time import warnings from contextlib import suppress import numpy as np from .base import DELAYED, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX from .image import ImageHDU from .table import BinTableHDU from ..card import Card from ..column import Column, ColDefs, TDEF_RE from ..column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES from ..fitsrec import FITS_rec from ..header import Header from ..util import (_is_pseudo_unsigned, _unsigned_zero, _is_int, _get_array_mmap) from ....utils import lazyproperty from ....utils.exceptions import (AstropyPendingDeprecationWarning, AstropyUserWarning) try: from .. import compression COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = True except ImportError: COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = False # Quantization dithering method constants; these are right out of fitsio.h NO_DITHER = -1 SUBTRACTIVE_DITHER_1 = 1 SUBTRACTIVE_DITHER_2 = 2 QUANTIZE_METHOD_NAMES = { NO_DITHER: 'NO_DITHER', SUBTRACTIVE_DITHER_1: 'SUBTRACTIVE_DITHER_1', SUBTRACTIVE_DITHER_2: 'SUBTRACTIVE_DITHER_2' } DITHER_SEED_CLOCK = 0 DITHER_SEED_CHECKSUM = -1 COMPRESSION_TYPES = ('RICE_1', 'GZIP_1', 'GZIP_2', 'PLIO_1', 'HCOMPRESS_1') # Default compression parameter values DEFAULT_COMPRESSION_TYPE = 'RICE_1' DEFAULT_QUANTIZE_LEVEL = 16. DEFAULT_QUANTIZE_METHOD = NO_DITHER DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK DEFAULT_HCOMP_SCALE = 0 DEFAULT_HCOMP_SMOOTH = 0 DEFAULT_BLOCK_SIZE = 32 DEFAULT_BYTE_PIX = 4 CMTYPE_ALIASES = {} # CFITSIO version-specific features if COMPRESSION_SUPPORTED: try: CFITSIO_SUPPORTS_GZIPDATA = compression.CFITSIO_VERSION >= 3.28 CFITSIO_SUPPORTS_Q_FORMAT = compression.CFITSIO_VERSION >= 3.35 if compression.CFITSIO_VERSION >= 3.35: CMTYPE_ALIASES['RICE_ONE'] = 'RICE_1' except AttributeError: # This generally shouldn't happen unless running setup.py in an # environment where an old build of pyfits exists CFITSIO_SUPPORTS_GZIPDATA = True CFITSIO_SUPPORTS_Q_FORMAT = True COMPRESSION_KEYWORDS = {'ZIMAGE', 'ZCMPTYPE', 'ZBITPIX', 'ZNAXIS', 'ZMASKCMP', 'ZSIMPLE', 'ZTENSION', 'ZEXTEND'} class CompImageHeader(Header): """ Header object for compressed image HDUs designed to keep the compression header and the underlying image header properly synchronized. This essentially wraps the image header, so that all values are read from and written to the image header. However, updates to the image header will also update the table header where appropriate. """ # TODO: The difficulty of implementing this screams a need to rewrite this # module _keyword_remaps = { 'SIMPLE': 'ZSIMPLE', 'XTENSION': 'ZTENSION', 'BITPIX': 'ZBITPIX', 'NAXIS': 'ZNAXIS', 'EXTEND': 'ZEXTEND', 'BLOCKED': 'ZBLOCKED', 'PCOUNT': 'ZPCOUNT', 'GCOUNT': 'ZGCOUNT', 'CHECKSUM': 'ZHECKSUM', 'DATASUM': 'ZDATASUM' } _zdef_re = re.compile(r'(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?') _compression_keywords = set(_keyword_remaps.values()).union( ['ZIMAGE', 'ZCMPTYPE', 'ZMASKCMP', 'ZQUANTIZ', 'ZDITHER0']) _indexed_compression_keywords = {'ZNAXIS', 'ZTILE', 'ZNAME', 'ZVAL'} # TODO: Once it place it should be possible to manage some of this through # the schema system, but it's not quite ready for that yet. Also it still # makes more sense to change CompImageHDU to subclass ImageHDU :/ def __init__(self, table_header, image_header=None): if image_header is None: image_header = Header() self._cards = image_header._cards self._keyword_indices = image_header._keyword_indices self._rvkc_indices = image_header._rvkc_indices self._modified = image_header._modified self._table_header = table_header # We need to override and Header methods that can modify the header, and # ensure that they sync with the underlying _table_header def __setitem__(self, key, value): # This isn't pretty, but if the `key` is either an int or a tuple we # need to figure out what keyword name that maps to before doing # anything else; these checks will be repeated later in the # super().__setitem__ call but I don't see another way around it # without some major refactoring if self._set_slice(key, value, self): return if isinstance(key, int): keyword, index = self._keyword_from_index(key) elif isinstance(key, tuple): keyword, index = key else: # We don't want to specify and index otherwise, because that will # break the behavior for new keywords and for commentary keywords keyword, index = key, None if self._is_reserved_keyword(keyword): return super().__setitem__(key, value) if index is not None: remapped_keyword = self._remap_keyword(keyword) self._table_header[remapped_keyword, index] = value # Else this will pass through to ._update def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # If given a slice pass that on to the superclass and bail out # early; we only want to make updates to _table_header when given # a key specifying a single keyword return super().__delitem__(key) if isinstance(key, int): keyword, index = self._keyword_from_index(key) elif isinstance(key, tuple): keyword, index = key else: keyword, index = key, None if key not in self: raise KeyError("Keyword {!r} not found.".format(key)) super().__delitem__(key) remapped_keyword = self._remap_keyword(keyword) if remapped_keyword in self._table_header: if index is not None: del self._table_header[(remapped_keyword, index)] else: del self._table_header[remapped_keyword] def append(self, card=None, useblanks=True, bottom=False, end=False): # This logic unfortunately needs to be duplicated from the base class # in order to determine the keyword if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( 'The value appended to a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if self._is_reserved_keyword(card.keyword): return super().append(card=card, useblanks=useblanks, bottom=bottom, end=end) remapped_keyword = self._remap_keyword(card.keyword) card = Card(remapped_keyword, card.value, card.comment) # Here we disable the use of blank cards, because the call above to # Header.append may have already deleted a blank card in the table # header, thanks to inheritance: Header.append calls 'del self[-1]' # to delete a blank card, which calls CompImageHeader.__deltitem__, # which deletes the blank card both in the image and the table headers! self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end) def insert(self, key, card, useblanks=True, after=False): if isinstance(key, int): # Determine condition to pass through to append if after: if key == -1: key = len(self._cards) else: key += 1 if key >= len(self._cards): self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( 'The value inserted into a Header must be either a keyword or ' '(keyword, value, [comment]) tuple; got: {!r}'.format(card)) if self._is_reserved_keyword(card.keyword): return # Now the tricky part is to determine where to insert in the table # header. If given a numerical index we need to map that to the # corresponding index in the table header. Although rare, there may be # cases where there is no mapping in which case we just try the same # index # NOTE: It is crucial that remapped_index in particular is figured out # before the image header is modified remapped_index = self._remap_index(key) remapped_keyword = self._remap_keyword(card.keyword) super().insert(key, card, useblanks=useblanks, after=after) card = Card(remapped_keyword, card.value, card.comment) # Here we disable the use of blank cards, because the call above to # Header.insert may have already deleted a blank card in the table # header, thanks to inheritance: Header.insert calls 'del self[-1]' # to delete a blank card, which calls CompImageHeader.__delitem__, # which deletes the blank card both in the image and the table headers! self._table_header.insert(remapped_index, card, useblanks=False, after=after) def _update(self, card): keyword = card[0] if self._is_reserved_keyword(keyword): return super()._update(card) if keyword in Card._commentary_keywords: # Otherwise this will result in a duplicate insertion return remapped_keyword = self._remap_keyword(keyword) self._table_header._update((remapped_keyword,) + card[1:]) # Last piece needed (I think) for synchronizing with the real header # This one is tricky since _relativeinsert calls insert def _relativeinsert(self, card, before=None, after=None, replace=False): keyword = card[0] if self._is_reserved_keyword(keyword): return # Now we have to figure out how to remap 'before' and 'after' if before is None: if isinstance(after, int): remapped_after = self._remap_index(after) else: remapped_after = self._remap_keyword(after) remapped_before = None else: if isinstance(before, int): remapped_before = self._remap_index(before) else: remapped_before = self._remap_keyword(before) remapped_after = None super()._relativeinsert(card, before=before, after=after, replace=replace) remapped_keyword = self._remap_keyword(keyword) card = Card(remapped_keyword, card[1], card[2]) self._table_header._relativeinsert(card, before=remapped_before, after=remapped_after, replace=replace) @classmethod def _is_reserved_keyword(cls, keyword, warn=True): msg = ('Keyword {!r} is reserved for use by the FITS Tiled Image ' 'Convention and will not be stored in the header for the ' 'image being compressed.'.format(keyword)) if keyword == 'TFIELDS': if warn: warnings.warn(msg) return True m = TDEF_RE.match(keyword) if m and m.group('label').upper() in TABLE_KEYWORD_NAMES: if warn: warnings.warn(msg) return True m = cls._zdef_re.match(keyword) if m: label = m.group('label').upper() num = m.group('num') if num is not None and label in cls._indexed_compression_keywords: if warn: warnings.warn(msg) return True elif label in cls._compression_keywords: if warn: warnings.warn(msg) return True return False @classmethod def _remap_keyword(cls, keyword): # Given a keyword that one might set on an image, remap that keyword to # the name used for it in the COMPRESSED HDU header # This is mostly just a lookup in _keyword_remaps, but needs handling # for NAXISn keywords is_naxisn = False if keyword[:5] == 'NAXIS': with suppress(ValueError): index = int(keyword[5:]) is_naxisn = index > 0 if is_naxisn: return 'ZNAXIS{}'.format(index) # If the keyword does not need to be remapped then just return the # original keyword return cls._keyword_remaps.get(keyword, keyword) def _remap_index(self, idx): # Given an integer index into this header, map that to the index in the # table header for the same card. If the card doesn't exist in the # table header (generally should *not* be the case) this will just # return the same index # This *does* also accept a keyword or (keyword, repeat) tuple and # obtains the associated numerical index with self._cardindex if not isinstance(idx, int): idx = self._cardindex(idx) keyword, repeat = self._keyword_from_index(idx) remapped_insert_keyword = self._remap_keyword(keyword) with suppress(IndexError, KeyError): idx = self._table_header._cardindex((remapped_insert_keyword, repeat)) return idx # TODO: Fix this class so that it doesn't actually inherit from BinTableHDU, # but instead has an internal BinTableHDU reference class CompImageHDU(BinTableHDU): """ Compressed Image HDU class. """ # Maps deprecated keyword arguments to __init__ to their new names DEPRECATED_KWARGS = { 'compressionType': 'compression_type', 'tileSize': 'tile_size', 'hcompScale': 'hcomp_scale', 'hcompSmooth': 'hcomp_smooth', 'quantizeLevel': 'quantize_level' } _manages_own_heap = True """ The calls to CFITSIO lay out the heap data in memory, and we write it out the same way CFITSIO organizes it. In principle this would break if a user manually changes the underlying compressed data by hand, but there is no reason they would want to do that (and if they do that's their responsibility). """ def __init__(self, data=None, header=None, name=None, compression_type=DEFAULT_COMPRESSION_TYPE, tile_size=None, hcomp_scale=DEFAULT_HCOMP_SCALE, hcomp_smooth=DEFAULT_HCOMP_SMOOTH, quantize_level=DEFAULT_QUANTIZE_LEVEL, quantize_method=DEFAULT_QUANTIZE_METHOD, dither_seed=DEFAULT_DITHER_SEED, do_not_scale_image_data=False, uint=False, scale_back=False, **kwargs): """ Parameters ---------- data : array, optional Uncompressed image data header : Header instance, optional Header to be associated with the image; when reading the HDU from a file (data=DELAYED), the header read from the file name : str, optional The ``EXTNAME`` value; if this value is `None`, then the name from the input image header will be used; if there is no name in the input image header then the default name ``COMPRESSED_IMAGE`` is used. compression_type : str, optional Compression algorithm: one of ``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``, ``'GZIP_2'``, ``'HCOMPRESS_1'`` tile_size : int, optional Compression tile sizes. Default treats each row of image as a tile. hcomp_scale : float, optional HCOMPRESS scale parameter hcomp_smooth : float, optional HCOMPRESS smooth parameter quantize_level : float, optional Floating point quantization level; see note below quantize_method : int, optional Floating point quantization dithering method; can be either ``NO_DITHER`` (-1), ``SUBTRACTIVE_DITHER_1`` (1; default), or ``SUBTRACTIVE_DITHER_2`` (2); see note below dither_seed : int, optional Random seed to use for dithering; can be either an integer in the range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or ``DITHER_SEED_CHECKSUM`` (-1); see note below Notes ----- The astropy.io.fits package supports 2 methods of image compression: 1) The entire FITS file may be externally compressed with the gzip or pkzip utility programs, producing a ``*.gz`` or ``*.zip`` file, respectively. When reading compressed files of this type, Astropy first uncompresses the entire file into a temporary file before performing the requested read operations. The astropy.io.fits package does not support writing to these types of compressed files. This type of compression is supported in the ``_File`` class, not in the `CompImageHDU` class. The file compression type is recognized by the ``.gz`` or ``.zip`` file name extension. 2) The `CompImageHDU` class supports the FITS tiled image compression convention in which the image is subdivided into a grid of rectangular tiles, and each tile of pixels is individually compressed. The details of this FITS compression convention are described at the `FITS Support Office web site <https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_. Basically, the compressed image tiles are stored in rows of a variable length array column in a FITS binary table. The astropy.io.fits recognizes that this binary table extension contains an image and treats it as if it were an image extension. Under this tile-compression format, FITS header keywords remain uncompressed. At this time, Astropy does not support the ability to extract and uncompress sections of the image without having to uncompress the entire image. The astropy.io.fits package supports 3 general-purpose compression algorithms plus one other special-purpose compression technique that is designed for data masks with positive integer pixel values. The 3 general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the special-purpose technique is the IRAF pixel list compression technique (PLIO). The ``compression_type`` parameter defines the compression algorithm to be used. The FITS image can be subdivided into any desired rectangular grid of compression tiles. With the GZIP, Rice, and PLIO algorithms, the default is to take each row of the image as a tile. The HCOMPRESS algorithm is inherently 2-dimensional in nature, so the default in this case is to take 16 rows of the image per tile. In most cases, it makes little difference what tiling pattern is used, so the default tiles are usually adequate. In the case of very small images, it could be more efficient to compress the whole image as a single tile. Note that the image dimensions are not required to be an integer multiple of the tile dimensions; if not, then the tiles at the edges of the image will be smaller than the other tiles. The ``tile_size`` parameter may be provided as a list of tile sizes, one for each dimension in the image. For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X 300 image into 9 100 X 100 tiles. The 4 supported image compression algorithms are all 'lossless' when applied to integer FITS images; the pixel values are preserved exactly with no loss of information during the compression and uncompression process. In addition, the HCOMPRESS algorithm supports a 'lossy' compression mode that will produce larger amount of image compression. This is achieved by specifying a non-zero value for the ``hcomp_scale`` parameter. Since the amount of compression that is achieved depends directly on the RMS noise in the image, it is usually more convenient to specify the ``hcomp_scale`` factor relative to the RMS noise. Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5 times the calculated RMS noise in the image tile. In some cases it may be desirable to specify the exact scaling to be used, instead of specifying it relative to the calculated noise value. This may be done by specifying the negative of the desired scale value (typically in the range -2 to -100). Very high compression factors (of 100 or more) can be achieved by using large ``hcomp_scale`` values, however, this can produce undesirable 'blocky' artifacts in the compressed image. A variation of the HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to apply a small amount of smoothing of the image when it is uncompressed to help cover up these artifacts. This smoothing is purely cosmetic and does not cause any significant change to the image pixel values. Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing algorithm. Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually contain too much 'noise' in the least significant bits of the mantissa of the pixel values to be effectively compressed with any lossless algorithm. Consequently, floating point images are first quantized into scaled integer pixel values (and thus throwing away much of the noise) before being compressed with the specified algorithm (either GZIP, RICE, or HCOMPRESS). This technique produces much higher compression factors than simply using the GZIP utility to externally compress the whole FITS file, but it also means that the original floating point value pixel values are not exactly preserved. When done properly, this integer scaling technique will only discard the insignificant noise while still preserving all the real information in the image. The amount of precision that is retained in the pixel values is controlled by the ``quantize_level`` parameter. Larger values will result in compressed images whose pixels more closely match the floating point pixel values, but at the same time the amount of compression that is achieved will be reduced. Users should experiment with different values for this parameter to determine the optimal value that preserves all the useful information in the image, without needlessly preserving all the 'noise' which will hurt the compression efficiency. The default value for the ``quantize_level`` scale factor is 16, which means that scaled integer pixel values will be quantized such that the difference between adjacent integer values will be 1/16th of the noise level in the image background. An optimized algorithm is used to accurately estimate the noise in the image. As an example, if the RMS noise in the background pixels of an image = 32.0, then the spacing between adjacent scaled integer pixel values will equal 2.0 by default. Note that the RMS noise is independently calculated for each tile of the image, so the resulting integer scaling factor may fluctuate slightly for each tile. In some cases, it may be desirable to specify the exact quantization level to be used, instead of specifying it relative to the calculated noise value. This may be done by specifying the negative of desired quantization level for the value of ``quantize_level``. In the previous example, one could specify ``quantize_level = -2.0`` so that the quantized integer levels differ by 2.0. Larger negative values for ``quantize_level`` means that the levels are more coarsely-spaced, and will produce higher compression factors. The quantization algorithm can also apply one of two random dithering methods in order to reduce bias in the measured intensity of background regions. The default method, specified with the constant ``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the quantization array itself rather than adding noise to the actual image. The random noise is added on a pixel-by-pixel basis, so in order restore each pixel from its integer value to its floating point value it is necessary to replay the same sequence of random numbers for each pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is exactly like the first except that before dithering any pixel with a floating point value of ``0.0`` is replaced with the special integer value ``-2147483647``. When the image is uncompressed, pixels with this value are restored back to ``0.0`` exactly. Finally, a value of ``NO_DITHER`` disables dithering entirely. As mentioned above, when using the subtractive dithering algorithm it is necessary to be able to generate a (pseudo-)random sequence of noise for each pixel, and replay that same sequence upon decompressing. To facilitate this, a random seed between 1 and 10000 (inclusive) is used to seed a random number generator, and that seed is stored in the ``ZDITHER0`` keyword in the header of the compressed HDU. In order to use that seed to generate the same sequence of random numbers the same random number generator must be used at compression and decompression time; for that reason the tiled image convention provides an implementation of a very simple pseudo-random number generator. The seed itself can be provided in one of three ways, controllable by the ``dither_seed`` argument: It may be specified manually, or it may be generated arbitrarily based on the system's clock (``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method is the default, and is sufficient to ensure that the value is reasonably "arbitrary" and that the same seed is unlikely to be generated sequentially. The checksum method, on the other hand, ensures that the same seed is used every time for a specific image. This is particularly useful for software testing as it ensures that the same image will always use the same seed. """ if not COMPRESSION_SUPPORTED: # TODO: Raise a more specific Exception type raise Exception('The astropy.io.fits.compression module is not ' 'available. Creation of compressed image HDUs is ' 'disabled.') compression_type = CMTYPE_ALIASES.get(compression_type, compression_type) # Handle deprecated keyword arguments compression_opts = {} for oldarg, newarg in self.DEPRECATED_KWARGS.items(): if oldarg in kwargs: warnings.warn('Keyword argument {} to {} is pending ' 'deprecation; use {} instead'.format( oldarg, self.__class__.__name__, newarg), AstropyPendingDeprecationWarning) compression_opts[newarg] = kwargs[oldarg] del kwargs[oldarg] else: compression_opts[newarg] = locals()[newarg] # Include newer compression options that don't required backwards # compatibility with deprecated spellings compression_opts['quantize_method'] = quantize_method compression_opts['dither_seed'] = dither_seed if data is DELAYED: # Reading the HDU from a file super().__init__(data=data, header=header) else: # Create at least a skeleton HDU that matches the input # header and data (if any were input) super().__init__(data=None, header=header) # Store the input image data self.data = data # Update the table header (_header) to the compressed # image format and to match the input data (if any); # Create the image header (_image_header) from the input # image header (if any) and ensure it matches the input # data; Create the initially empty table data array to # hold the compressed data. self._update_header_data(header, name, **compression_opts) # TODO: A lot of this should be passed on to an internal image HDU o # something like that, see ticket #88 self._do_not_scale_image_data = do_not_scale_image_data self._uint = uint self._scale_back = scale_back self._axes = [self._header.get('ZNAXIS' + str(axis + 1), 0) for axis in range(self._header.get('ZNAXIS', 0))] # store any scale factors from the table header if do_not_scale_image_data: self._bzero = 0 self._bscale = 1 else: self._bzero = self._header.get('BZERO', 0) self._bscale = self._header.get('BSCALE', 1) self._bitpix = self._header['ZBITPIX'] self._orig_bzero = self._bzero self._orig_bscale = self._bscale self._orig_bitpix = self._bitpix @classmethod def match_header(cls, header): card = header.cards[0] if card.keyword != 'XTENSION': return False xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() if xtension not in ('BINTABLE', 'A3DTABLE'): return False if 'ZIMAGE' not in header or not header['ZIMAGE']: return False if COMPRESSION_SUPPORTED and COMPRESSION_ENABLED: return True elif not COMPRESSION_SUPPORTED: warnings.warn('Failure matching header to a compressed image ' 'HDU: The compression module is not available.\n' 'The HDU will be treated as a Binary Table HDU.', AstropyUserWarning) return False else: # Compression is supported but disabled; just pass silently (#92) return False def _update_header_data(self, image_header, name=None, compression_type=None, tile_size=None, hcomp_scale=None, hcomp_smooth=None, quantize_level=None, quantize_method=None, dither_seed=None): """ Update the table header (`_header`) to the compressed image format and to match the input data (if any). Create the image header (`_image_header`) from the input image header (if any) and ensure it matches the input data. Create the initially-empty table data array to hold the compressed data. This method is mainly called internally, but a user may wish to call this method after assigning new data to the `CompImageHDU` object that is of a different type. Parameters ---------- image_header : Header instance header to be associated with the image name : str, optional the ``EXTNAME`` value; if this value is `None`, then the name from the input image header will be used; if there is no name in the input image header then the default name 'COMPRESSED_IMAGE' is used compression_type : str, optional compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2', 'HCOMPRESS_1'; if this value is `None`, use value already in the header; if no value already in the header, use 'RICE_1' tile_size : sequence of int, optional compression tile sizes as a list; if this value is `None`, use value already in the header; if no value already in the header, treat each row of image as a tile hcomp_scale : float, optional HCOMPRESS scale parameter; if this value is `None`, use the value already in the header; if no value already in the header, use 1 hcomp_smooth : float, optional HCOMPRESS smooth parameter; if this value is `None`, use the value already in the header; if no value already in the header, use 0 quantize_level : float, optional floating point quantization level; if this value is `None`, use the value already in the header; if no value already in header, use 16 quantize_method : int, optional floating point quantization dithering method; can be either NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or SUBTRACTIVE_DITHER_2 (2) dither_seed : int, optional random seed to use for dithering; can be either an integer in the range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or DITHER_SEED_CHECKSUM (-1) """ image_hdu = ImageHDU(data=self.data, header=self._header) self._image_header = CompImageHeader(self._header, image_hdu.header) self._axes = image_hdu._axes del image_hdu # Determine based on the size of the input data whether to use the Q # column format to store compressed data or the P format. # The Q format is used only if the uncompressed data is larger than # 4 GB. This is not a perfect heuristic, as one can contrive an input # array which, when compressed, the entire binary table representing # the compressed data is larger than 4GB. That said, this is the same # heuristic used by CFITSIO, so this should give consistent results. # And the cases where this heuristic is insufficient are extreme and # almost entirely contrived corner cases, so it will do for now if self._has_data: huge_hdu = self.data.nbytes > 2 ** 32 if huge_hdu and not CFITSIO_SUPPORTS_Q_FORMAT: raise OSError( "Astropy cannot compress images greater than 4 GB in size " "({} is {} bytes) without CFITSIO >= 3.35".format( (self.name, self.ver), self.data.nbytes)) else: huge_hdu = False # Update the extension name in the table header if not name and 'EXTNAME' not in self._header: name = 'COMPRESSED_IMAGE' if name: self._header.set('EXTNAME', name, 'name of this binary table extension', after='TFIELDS') self.name = name else: self.name = self._header['EXTNAME'] # Set the compression type in the table header. if compression_type: if compression_type not in COMPRESSION_TYPES: warnings.warn( 'Unknown compression type provided (supported are {}). ' 'Default ({}) compression will be used.' .format(', '.join(map(repr, COMPRESSION_TYPES)), DEFAULT_COMPRESSION_TYPE), AstropyUserWarning) compression_type = DEFAULT_COMPRESSION_TYPE self._header.set('ZCMPTYPE', compression_type, 'compression algorithm', after='TFIELDS') else: compression_type = self._header.get('ZCMPTYPE', DEFAULT_COMPRESSION_TYPE) compression_type = CMTYPE_ALIASES.get(compression_type, compression_type) # If the input image header had BSCALE/BZERO cards, then insert # them in the table header. if image_header: bzero = image_header.get('BZERO', 0.0) bscale = image_header.get('BSCALE', 1.0) after_keyword = 'EXTNAME' if bscale != 1.0: self._header.set('BSCALE', bscale, after=after_keyword) after_keyword = 'BSCALE' if bzero != 0.0: self._header.set('BZERO', bzero, after=after_keyword) bitpix_comment = image_header.comments['BITPIX'] naxis_comment = image_header.comments['NAXIS'] else: bitpix_comment = 'data type of original image' naxis_comment = 'dimension of original image' # Set the label for the first column in the table self._header.set('TTYPE1', 'COMPRESSED_DATA', 'label for field 1', after='TFIELDS') # Set the data format for the first column. It is dependent # on the requested compression type. if compression_type == 'PLIO_1': tform1 = '1QI' if huge_hdu else '1PI' else: tform1 = '1QB' if huge_hdu else '1PB' self._header.set('TFORM1', tform1, 'data format of field: variable length array', after='TTYPE1') # Create the first column for the table. This column holds the # compressed data. col1 = Column(name=self._header['TTYPE1'], format=tform1) # Create the additional columns required for floating point # data and calculate the width of the output table. zbitpix = self._image_header['BITPIX'] if zbitpix < 0 and quantize_level != 0.0: # floating point image has 'COMPRESSED_DATA', # 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using # lossless compression, per CFITSIO) ncols = 4 # CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA # store floating point data that couldn't be quantized, instead # of the UNCOMPRESSED_DATA column. There's no way to control # this behavior so the only way to determine which behavior will # be employed is via the CFITSIO version if CFITSIO_SUPPORTS_GZIPDATA: ttype2 = 'GZIP_COMPRESSED_DATA' # The required format for the GZIP_COMPRESSED_DATA is actually # missing from the standard docs, but CFITSIO suggests it # should be 1PB, which is logical. tform2 = '1QB' if huge_hdu else '1PB' else: # Q format is not supported for UNCOMPRESSED_DATA columns. ttype2 = 'UNCOMPRESSED_DATA' if zbitpix == 8: tform2 = '1QB' if huge_hdu else '1PB' elif zbitpix == 16: tform2 = '1QI' if huge_hdu else '1PI' elif zbitpix == 32: tform2 = '1QJ' if huge_hdu else '1PJ' elif zbitpix == -32: tform2 = '1QE' if huge_hdu else '1PE' else: tform2 = '1QD' if huge_hdu else '1PD' # Set up the second column for the table that will hold any # uncompressable data. self._header.set('TTYPE2', ttype2, 'label for field 2', after='TFORM1') self._header.set('TFORM2', tform2, 'data format of field: variable length array', after='TTYPE2') col2 = Column(name=ttype2, format=tform2) # Set up the third column for the table that will hold # the scale values for quantized data. self._header.set('TTYPE3', 'ZSCALE', 'label for field 3', after='TFORM2') self._header.set('TFORM3', '1D', 'data format of field: 8-byte DOUBLE', after='TTYPE3') col3 = Column(name=self._header['TTYPE3'], format=self._header['TFORM3']) # Set up the fourth column for the table that will hold # the zero values for the quantized data. self._header.set('TTYPE4', 'ZZERO', 'label for field 4', after='TFORM3') self._header.set('TFORM4', '1D', 'data format of field: 8-byte DOUBLE', after='TTYPE4') after = 'TFORM4' col4 = Column(name=self._header['TTYPE4'], format=self._header['TFORM4']) # Create the ColDefs object for the table cols = ColDefs([col1, col2, col3, col4]) else: # default table has just one 'COMPRESSED_DATA' column ncols = 1 after = 'TFORM1' # remove any header cards for the additional columns that # may be left over from the previous data to_remove = ['TTYPE2', 'TFORM2', 'TTYPE3', 'TFORM3', 'TTYPE4', 'TFORM4'] for k in to_remove: try: del self._header[k] except KeyError: pass # Create the ColDefs object for the table cols = ColDefs([col1]) # Update the table header with the width of the table, the # number of fields in the table, the indicator for a compressed # image HDU, the data type of the image data and the number of # dimensions in the image data array. self._header.set('NAXIS1', cols.dtype.itemsize, 'width of table in bytes') self._header.set('TFIELDS', ncols, 'number of fields in each row', after='GCOUNT') self._header.set('ZIMAGE', True, 'extension contains compressed image', after=after) self._header.set('ZBITPIX', zbitpix, bitpix_comment, after='ZIMAGE') self._header.set('ZNAXIS', self._image_header['NAXIS'], naxis_comment, after='ZBITPIX') # Strip the table header of all the ZNAZISn and ZTILEn keywords # that may be left over from the previous data for idx in itertools.count(1): try: del self._header['ZNAXIS' + str(idx)] del self._header['ZTILE' + str(idx)] except KeyError: break # Verify that any input tile size parameter is the appropriate # size to match the HDU's data. naxis = self._image_header['NAXIS'] if not tile_size: tile_size = [] elif len(tile_size) != naxis: warnings.warn('Provided tile size not appropriate for the data. ' 'Default tile size will be used.', AstropyUserWarning) tile_size = [] # Set default tile dimensions for HCOMPRESS_1 if compression_type == 'HCOMPRESS_1': if (self._image_header['NAXIS1'] < 4 or self._image_header['NAXIS2'] < 4): raise ValueError('Hcompress minimum image dimension is ' '4 pixels') elif tile_size: if tile_size[0] < 4 or tile_size[1] < 4: # user specified tile size is too small raise ValueError('Hcompress minimum tile dimension is ' '4 pixels') major_dims = len([ts for ts in tile_size if ts > 1]) if major_dims > 2: raise ValueError( 'HCOMPRESS can only support 2-dimensional tile sizes.' 'All but two of the tile_size dimensions must be set ' 'to 1.') if tile_size and (tile_size[0] == 0 and tile_size[1] == 0): # compress the whole image as a single tile tile_size[0] = self._image_header['NAXIS1'] tile_size[1] = self._image_header['NAXIS2'] for i in range(2, naxis): # set all higher tile dimensions = 1 tile_size[i] = 1 elif not tile_size: # The Hcompress algorithm is inherently 2D in nature, so the # row by row tiling that is used for other compression # algorithms is not appropriate. If the image has less than 30 # rows, then the entire image will be compressed as a single # tile. Otherwise the tiles will consist of 16 rows of the # image. This keeps the tiles to a reasonable size, and it # also includes enough rows to allow good compression # efficiency. It the last tile of the image happens to contain # less than 4 rows, then find another tile size with between 14 # and 30 rows (preferably even), so that the last tile has at # least 4 rows. # 1st tile dimension is the row length of the image tile_size.append(self._image_header['NAXIS1']) if self._image_header['NAXIS2'] <= 30: tile_size.append(self._image_header['NAXIS1']) else: # look for another good tile dimension naxis2 = self._image_header['NAXIS2'] for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]: if naxis2 % dim == 0 or naxis2 % dim > 3: tile_size.append(dim) break else: tile_size.append(17) for i in range(2, naxis): # set all higher tile dimensions = 1 tile_size.append(1) # check if requested tile size causes the last tile to have # less than 4 pixels remain = self._image_header['NAXIS1'] % tile_size[0] # 1st dimen if remain > 0 and remain < 4: tile_size[0] += 1 # try increasing tile size by 1 remain = self._image_header['NAXIS1'] % tile_size[0] if remain > 0 and remain < 4: raise ValueError('Last tile along 1st dimension has ' 'less than 4 pixels') remain = self._image_header['NAXIS2'] % tile_size[1] # 2nd dimen if remain > 0 and remain < 4: tile_size[1] += 1 # try increasing tile size by 1 remain = self._image_header['NAXIS2'] % tile_size[1] if remain > 0 and remain < 4: raise ValueError('Last tile along 2nd dimension has ' 'less than 4 pixels') # Set up locations for writing the next cards in the header. last_znaxis = 'ZNAXIS' if self._image_header['NAXIS'] > 0: after1 = 'ZNAXIS1' else: after1 = 'ZNAXIS' # Calculate the number of rows in the output table and # write the ZNAXISn and ZTILEn cards to the table header. nrows = 0 for idx, axis in enumerate(self._axes): naxis = 'NAXIS' + str(idx + 1) znaxis = 'ZNAXIS' + str(idx + 1) ztile = 'ZTILE' + str(idx + 1) if tile_size and len(tile_size) >= idx + 1: ts = tile_size[idx] else: if ztile not in self._header: # Default tile size if not idx: ts = self._image_header['NAXIS1'] else: ts = 1 else: ts = self._header[ztile] tile_size.append(ts) if not nrows: nrows = (axis - 1) // ts + 1 else: nrows *= ((axis - 1) // ts + 1) if image_header and naxis in image_header: self._header.set(znaxis, axis, image_header.comments[naxis], after=last_znaxis) else: self._header.set(znaxis, axis, 'length of original image axis', after=last_znaxis) self._header.set(ztile, ts, 'size of tiles to be compressed', after=after1) last_znaxis = znaxis after1 = ztile # Set the NAXIS2 header card in the table hdu to the number of # rows in the table. self._header.set('NAXIS2', nrows, 'number of rows in table') self.columns = cols # Set the compression parameters in the table header. # First, setup the values to be used for the compression parameters # in case none were passed in. This will be either the value # already in the table header for that parameter or the default # value. for idx in itertools.count(1): zname = 'ZNAME' + str(idx) if zname not in self._header: break zval = 'ZVAL' + str(idx) if self._header[zname] == 'NOISEBIT': if quantize_level is None: quantize_level = self._header[zval] if self._header[zname] == 'SCALE ': if hcomp_scale is None: hcomp_scale = self._header[zval] if self._header[zname] == 'SMOOTH ': if hcomp_smooth is None: hcomp_smooth = self._header[zval] if quantize_level is None: quantize_level = DEFAULT_QUANTIZE_LEVEL if hcomp_scale is None: hcomp_scale = DEFAULT_HCOMP_SCALE if hcomp_smooth is None: hcomp_smooth = DEFAULT_HCOMP_SCALE # Next, strip the table header of all the ZNAMEn and ZVALn keywords # that may be left over from the previous data for idx in itertools.count(1): zname = 'ZNAME' + str(idx) if zname not in self._header: break zval = 'ZVAL' + str(idx) del self._header[zname] del self._header[zval] # Finally, put the appropriate keywords back based on the # compression type. after_keyword = 'ZCMPTYPE' idx = 1 if compression_type == 'RICE_1': self._header.set('ZNAME1', 'BLOCKSIZE', 'compression block size', after=after_keyword) self._header.set('ZVAL1', DEFAULT_BLOCK_SIZE, 'pixels per block', after='ZNAME1') self._header.set('ZNAME2', 'BYTEPIX', 'bytes per pixel (1, 2, 4, or 8)', after='ZVAL1') if self._header['ZBITPIX'] == 8: bytepix = 1 elif self._header['ZBITPIX'] == 16: bytepix = 2 else: bytepix = DEFAULT_BYTE_PIX self._header.set('ZVAL2', bytepix, 'bytes per pixel (1, 2, 4, or 8)', after='ZNAME2') after_keyword = 'ZVAL2' idx = 3 elif compression_type == 'HCOMPRESS_1': self._header.set('ZNAME1', 'SCALE', 'HCOMPRESS scale factor', after=after_keyword) self._header.set('ZVAL1', hcomp_scale, 'HCOMPRESS scale factor', after='ZNAME1') self._header.set('ZNAME2', 'SMOOTH', 'HCOMPRESS smooth option', after='ZVAL1') self._header.set('ZVAL2', hcomp_smooth, 'HCOMPRESS smooth option', after='ZNAME2') after_keyword = 'ZVAL2' idx = 3 if self._image_header['BITPIX'] < 0: # floating point image self._header.set('ZNAME' + str(idx), 'NOISEBIT', 'floating point quantization level', after=after_keyword) self._header.set('ZVAL' + str(idx), quantize_level, 'floating point quantization level', after='ZNAME' + str(idx)) # Add the dither method and seed if quantize_method: if quantize_method not in [NO_DITHER, SUBTRACTIVE_DITHER_1, SUBTRACTIVE_DITHER_2]: name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD] warnings.warn('Unknown quantization method provided. ' 'Default method ({}) used.'.format(name)) quantize_method = DEFAULT_QUANTIZE_METHOD if quantize_method == NO_DITHER: zquantiz_comment = 'No dithering during quantization' else: zquantiz_comment = 'Pixel Quantization Algorithm' self._header.set('ZQUANTIZ', QUANTIZE_METHOD_NAMES[quantize_method], zquantiz_comment, after='ZVAL' + str(idx)) else: # If the ZQUANTIZ keyword is missing the default is to assume # no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD # is set to quantize_method = self._header.get('ZQUANTIZ', NO_DITHER) if isinstance(quantize_method, str): for k, v in QUANTIZE_METHOD_NAMES.items(): if v.upper() == quantize_method: quantize_method = k break else: quantize_method = NO_DITHER if quantize_method == NO_DITHER: if 'ZDITHER0' in self._header: # If dithering isn't being used then there's no reason to # keep the ZDITHER0 keyword del self._header['ZDITHER0'] else: if dither_seed: dither_seed = self._generate_dither_seed(dither_seed) elif 'ZDITHER0' in self._header: dither_seed = self._header['ZDITHER0'] else: dither_seed = self._generate_dither_seed( DEFAULT_DITHER_SEED) self._header.set('ZDITHER0', dither_seed, 'dithering offset when quantizing floats', after='ZQUANTIZ') if image_header: # Move SIMPLE card from the image header to the # table header as ZSIMPLE card. if 'SIMPLE' in image_header: self._header.set('ZSIMPLE', image_header['SIMPLE'], image_header.comments['SIMPLE'], before='ZBITPIX') # Move EXTEND card from the image header to the # table header as ZEXTEND card. if 'EXTEND' in image_header: self._header.set('ZEXTEND', image_header['EXTEND'], image_header.comments['EXTEND']) # Move BLOCKED card from the image header to the # table header as ZBLOCKED card. if 'BLOCKED' in image_header: self._header.set('ZBLOCKED', image_header['BLOCKED'], image_header.comments['BLOCKED']) # Move XTENSION card from the image header to the # table header as ZTENSION card. # Since we only handle compressed IMAGEs, ZTENSION should # always be IMAGE, even if the caller has passed in a header # for some other type of extension. if 'XTENSION' in image_header: self._header.set('ZTENSION', 'IMAGE', image_header.comments['XTENSION'], before='ZBITPIX') # Move PCOUNT and GCOUNT cards from image header to the table # header as ZPCOUNT and ZGCOUNT cards. if 'PCOUNT' in image_header: self._header.set('ZPCOUNT', image_header['PCOUNT'], image_header.comments['PCOUNT'], after=last_znaxis) if 'GCOUNT' in image_header: self._header.set('ZGCOUNT', image_header['GCOUNT'], image_header.comments['GCOUNT'], after='ZPCOUNT') # Move CHECKSUM and DATASUM cards from the image header to the # table header as XHECKSUM and XDATASUM cards. if 'CHECKSUM' in image_header: self._header.set('ZHECKSUM', image_header['CHECKSUM'], image_header.comments['CHECKSUM']) if 'DATASUM' in image_header: self._header.set('ZDATASUM', image_header['DATASUM'], image_header.comments['DATASUM']) else: # Move XTENSION card from the image header to the # table header as ZTENSION card. # Since we only handle compressed IMAGEs, ZTENSION should # always be IMAGE, even if the caller has passed in a header # for some other type of extension. if 'XTENSION' in self._image_header: self._header.set('ZTENSION', 'IMAGE', self._image_header.comments['XTENSION'], before='ZBITPIX') # Move PCOUNT and GCOUNT cards from image header to the table # header as ZPCOUNT and ZGCOUNT cards. if 'PCOUNT' in self._image_header: self._header.set('ZPCOUNT', self._image_header['PCOUNT'], self._image_header.comments['PCOUNT'], after=last_znaxis) if 'GCOUNT' in self._image_header: self._header.set('ZGCOUNT', self._image_header['GCOUNT'], self._image_header.comments['GCOUNT'], after='ZPCOUNT') # When we have an image checksum we need to ensure that the same # number of blank cards exist in the table header as there were in # the image header. This allows those blank cards to be carried # over to the image header when the hdu is uncompressed. if 'ZHECKSUM' in self._header: required_blanks = image_header._countblanks() image_blanks = self._image_header._countblanks() table_blanks = self._header._countblanks() for _ in range(required_blanks - image_blanks): self._image_header.append() table_blanks += 1 for _ in range(required_blanks - table_blanks): self._header.append() @lazyproperty def data(self): # The data attribute is the image data (not the table data). data = compression.decompress_hdu(self) if data is None: return data # Scale the data if necessary if (self._orig_bzero != 0 or self._orig_bscale != 1): new_dtype = self._dtype_for_bitpix() data = np.array(data, dtype=new_dtype) zblank = None if 'ZBLANK' in self.compressed_data.columns.names: zblank = self.compressed_data['ZBLANK'] else: if 'ZBLANK' in self._header: zblank = np.array(self._header['ZBLANK'], dtype='int32') elif 'BLANK' in self._header: zblank = np.array(self._header['BLANK'], dtype='int32') if zblank is not None: blanks = (data == zblank) if self._bscale != 1: np.multiply(data, self._bscale, data) if self._bzero != 0: # We have to explcitly cast self._bzero to prevent numpy from # raising an error when doing self.data += self._bzero, and we # do this instead of self.data = self.data + self._bzero to # avoid doubling memory usage. np.add(data, self._bzero, out=data, casting='unsafe') if zblank is not None: data = np.where(blanks, np.nan, data) # Right out of _ImageBaseHDU.data self._update_header_scale_info(data.dtype) return data @data.setter def data(self, data): if (data is not None) and (not isinstance(data, np.ndarray) or data.dtype.fields is not None): raise TypeError('CompImageHDU data has incorrect type:{}; ' 'dtype.fields = {}'.format( type(data), data.dtype.fields)) @lazyproperty def compressed_data(self): # First we will get the table data (the compressed # data) from the file, if there is any. compressed_data = super().data if isinstance(compressed_data, np.rec.recarray): # Make sure not to use 'del self.data' so we don't accidentally # go through the self.data.fdel and close the mmap underlying # the compressed_data array del self.__dict__['data'] return compressed_data else: # This will actually set self.compressed_data with the # pre-allocated space for the compression data; this is something I # might do away with in the future self._update_compressed_data() return self.compressed_data @compressed_data.deleter def compressed_data(self): # Deleting the compressed_data attribute has to be handled # with a little care to prevent a reference leak # First delete the ._coldefs attributes under it to break a possible # reference cycle if 'compressed_data' in self.__dict__: del self.__dict__['compressed_data']._coldefs # Now go ahead and delete from self.__dict__; normally # lazyproperty.__delete__ does this for us, but we can prempt it to # do some additional cleanup del self.__dict__['compressed_data'] # If this file was mmap'd, numpy.memmap will hold open a file # handle until the underlying mmap object is garbage-collected; # since this reference leak can sometimes hang around longer than # welcome go ahead and force a garbage collection gc.collect() @property def shape(self): """ Shape of the image array--should be equivalent to ``self.data.shape``. """ # Determine from the values read from the header return tuple(reversed(self._axes)) @lazyproperty def header(self): # The header attribute is the header for the image data. It # is not actually stored in the object dictionary. Instead, # the _image_header is stored. If the _image_header attribute # has already been defined we just return it. If not, we must # create it from the table header (the _header attribute). if hasattr(self, '_image_header'): return self._image_header # Start with a copy of the table header. image_header = self._header.copy() # Delete cards that are related to the table. And move # the values of those cards that relate to the image from # their corresponding table cards. These include # ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn. # (Note: Used set here instead of list in case there are any duplicate # keywords, which there may be in some pathological cases: # https://github.com/astropy/astropy/issues/2750 for keyword in set(image_header): if CompImageHeader._is_reserved_keyword(keyword, warn=False): del image_header[keyword] if 'ZSIMPLE' in self._header: image_header.set('SIMPLE', self._header['ZSIMPLE'], self._header.comments['ZSIMPLE'], before=0) elif 'ZTENSION' in self._header: if self._header['ZTENSION'] != 'IMAGE': warnings.warn("ZTENSION keyword in compressed " "extension != 'IMAGE'", AstropyUserWarning) image_header.set('XTENSION', 'IMAGE', self._header.comments['ZTENSION'], before=0) else: image_header.set('XTENSION', 'IMAGE', before=0) image_header.set('BITPIX', self._header['ZBITPIX'], self._header.comments['ZBITPIX'], before=1) image_header.set('NAXIS', self._header['ZNAXIS'], self._header.comments['ZNAXIS'], before=2) last_naxis = 'NAXIS' for idx in range(image_header['NAXIS']): znaxis = 'ZNAXIS' + str(idx + 1) naxis = znaxis[1:] image_header.set(naxis, self._header[znaxis], self._header.comments[znaxis], after=last_naxis) last_naxis = naxis # Delete any other spurious NAXISn keywords: naxis = image_header['NAXIS'] for keyword in list(image_header['NAXIS?*']): try: n = int(keyword[5:]) except Exception: continue if n > naxis: del image_header[keyword] # Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs, # ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs # their values are always 0 and 1 respectively if 'ZPCOUNT' in self._header: image_header.set('PCOUNT', self._header['ZPCOUNT'], self._header.comments['ZPCOUNT'], after=last_naxis) else: image_header.set('PCOUNT', 0, after=last_naxis) if 'ZGCOUNT' in self._header: image_header.set('GCOUNT', self._header['ZGCOUNT'], self._header.comments['ZGCOUNT'], after='PCOUNT') else: image_header.set('GCOUNT', 1, after='PCOUNT') if 'ZEXTEND' in self._header: image_header.set('EXTEND', self._header['ZEXTEND'], self._header.comments['ZEXTEND']) if 'ZBLOCKED' in self._header: image_header.set('BLOCKED', self._header['ZBLOCKED'], self._header.comments['ZBLOCKED']) # Move the ZHECKSUM and ZDATASUM cards to the image header # as CHECKSUM and DATASUM if 'ZHECKSUM' in self._header: image_header.set('CHECKSUM', self._header['ZHECKSUM'], self._header.comments['ZHECKSUM']) if 'ZDATASUM' in self._header: image_header.set('DATASUM', self._header['ZDATASUM'], self._header.comments['ZDATASUM']) # Remove the EXTNAME card if the value in the table header # is the default value of COMPRESSED_IMAGE. if ('EXTNAME' in self._header and self._header['EXTNAME'] == 'COMPRESSED_IMAGE'): del image_header['EXTNAME'] # Look to see if there are any blank cards in the table # header. If there are, there should be the same number # of blank cards in the image header. Add blank cards to # the image header to make it so. table_blanks = self._header._countblanks() image_blanks = image_header._countblanks() for _ in range(table_blanks - image_blanks): image_header.append() # Create the CompImageHeader that syncs with the table header, and save # it off to self._image_header so it can be referenced later # unambiguously self._image_header = CompImageHeader(self._header, image_header) return self._image_header def _summary(self): """ Summarize the HDU: name, dimensions, and formats. """ class_name = self.__class__.__name__ # if data is touched, use data info. if self._data_loaded: if self.data is None: _shape, _format = (), '' else: # the shape will be in the order of NAXIS's which is the # reverse of the numarray shape _shape = list(self.data.shape) _format = self.data.dtype.name _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.') + 1:] # if data is not touched yet, use header info. else: _shape = () for idx in range(self.header['NAXIS']): _shape += (self.header['NAXIS' + str(idx + 1)],) _format = BITPIX2DTYPE[self.header['BITPIX']] return (self.name, self.ver, class_name, len(self.header), _shape, _format) def _update_compressed_data(self): """ Compress the image data so that it may be written to a file. """ # Check to see that the image_header matches the image data image_bitpix = DTYPE2BITPIX[self.data.dtype.name] if image_bitpix != self._orig_bitpix or self.data.shape != self.shape: self._update_header_data(self.header) # TODO: This is copied right out of _ImageBaseHDU._writedata_internal; # it would be cool if we could use an internal ImageHDU and use that to # write to a buffer for compression or something. See ticket #88 # deal with unsigned integer 16, 32 and 64 data old_data = self.data if _is_pseudo_unsigned(self.data.dtype): # Convert the unsigned array to signed self.data = np.array( self.data - _unsigned_zero(self.data.dtype), dtype='=i{}'.format(self.data.dtype.itemsize)) should_swap = False else: should_swap = not self.data.dtype.isnative if should_swap: if self.data.flags.writeable: self.data.byteswap(True) else: # For read-only arrays, there is no way around making # a byteswapped copy of the data. self.data = self.data.byteswap(False) try: nrows = self._header['NAXIS2'] tbsize = self._header['NAXIS1'] * nrows self._header['PCOUNT'] = 0 if 'THEAP' in self._header: del self._header['THEAP'] self._theap = tbsize # First delete the original compressed data, if it exists del self.compressed_data # Compress the data. # The current implementation of compress_hdu assumes the empty # compressed data table has already been initialized in # self.compressed_data, and writes directly to it # compress_hdu returns the size of the heap for the written # compressed image table heapsize, self.compressed_data = compression.compress_hdu(self) finally: # if data was byteswapped return it to its original order if should_swap: self.data.byteswap(True) self.data = old_data # CFITSIO will write the compressed data in big-endian order dtype = self.columns.dtype.newbyteorder('>') buf = self.compressed_data compressed_data = buf[:self._theap].view(dtype=dtype, type=np.rec.recarray) self.compressed_data = compressed_data.view(FITS_rec) self.compressed_data._coldefs = self.columns self.compressed_data._heapoffset = self._theap self.compressed_data._heapsize = heapsize def scale(self, type=None, option='old', bscale=1, bzero=0): """ Scale image data by using ``BSCALE`` and ``BZERO``. Calling this method will scale ``self.data`` and update the keywords of ``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``. This method should only be used right before writing to the output file, as the data will be scaled and is therefore not very usable after the call. Parameters ---------- type : str, optional destination data type, use a string representing a numpy dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is `None`, use the current data type. option : str, optional how to scale the data: if ``"old"``, use the original ``BSCALE`` and ``BZERO`` values when the data was read/created. If ``"minmax"``, use the minimum and maximum of the data to scale. The option will be overwritten by any user-specified bscale/bzero values. bscale, bzero : int, optional user specified ``BSCALE`` and ``BZERO`` values. """ if self.data is None: return # Determine the destination (numpy) data type if type is None: type = BITPIX2DTYPE[self._bitpix] _type = getattr(np, type) # Determine how to scale the data # bscale and bzero takes priority if (bscale != 1 or bzero != 0): _scale = bscale _zero = bzero else: if option == 'old': _scale = self._orig_bscale _zero = self._orig_bzero elif option == 'minmax': if isinstance(_type, np.floating): _scale = 1 _zero = 0 else: _min = np.minimum.reduce(self.data.flat) _max = np.maximum.reduce(self.data.flat) if _type == np.uint8: # uint8 case _zero = _min _scale = (_max - _min) / (2. ** 8 - 1) else: _zero = (_max + _min) / 2. # throw away -2^N _scale = (_max - _min) / (2. ** (8 * _type.bytes) - 2) # Do the scaling if _zero != 0: # We have to explicitly cast self._bzero to prevent numpy from # raising an error when doing self.data -= _zero, and we # do this instead of self.data = self.data - _zero to # avoid doubling memory usage. np.subtract(self.data, _zero, out=self.data, casting='unsafe') self.header['BZERO'] = _zero else: # Delete from both headers for header in (self.header, self._header): with suppress(KeyError): del header['BZERO'] if _scale != 1: self.data /= _scale self.header['BSCALE'] = _scale else: for header in (self.header, self._header): with suppress(KeyError): del header['BSCALE'] if self.data.dtype.type != _type: self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1 # Update the BITPIX Card to match the data self._bitpix = DTYPE2BITPIX[self.data.dtype.name] self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) # Update BITPIX for the image header specifically # TODO: Make this more clear by using self._image_header, but only once # this has been fixed so that the _image_header attribute is guaranteed # to be valid self.header['BITPIX'] = self._bitpix # Update the table header to match the scaled data self._update_header_data(self.header) # Since the image has been manually scaled, the current # bitpix/bzero/bscale now serve as the 'original' scaling of the image, # as though the original image has been completely replaced self._orig_bitpix = self._bitpix self._orig_bzero = self._bzero self._orig_bscale = self._bscale def _prewriteto(self, checksum=False, inplace=False): if self._scale_back: self.scale(BITPIX2DTYPE[self._orig_bitpix]) if self._has_data: self._update_compressed_data() # Use methods in the superclass to update the header with # scale/checksum keywords based on the data type of the image data self._update_uint_scale_keywords() # Shove the image header and data into a new ImageHDU and use that # to compute the image checksum image_hdu = ImageHDU(data=self.data, header=self.header) image_hdu._update_checksum(checksum) if 'CHECKSUM' in image_hdu.header: # This will also pass through to the ZHECKSUM keyword and # ZDATASUM keyword self._image_header.set('CHECKSUM', image_hdu.header['CHECKSUM'], image_hdu.header.comments['CHECKSUM']) if 'DATASUM' in image_hdu.header: self._image_header.set('DATASUM', image_hdu.header['DATASUM'], image_hdu.header.comments['DATASUM']) # Store a temporary backup of self.data in a different attribute; # see below self._imagedata = self.data # Now we need to perform an ugly hack to set the compressed data as # the .data attribute on the HDU so that the call to _writedata # handles it properly self.__dict__['data'] = self.compressed_data return super()._prewriteto(checksum=checksum, inplace=inplace) def _writeheader(self, fileobj): """ Bypasses `BinTableHDU._writeheader()` which updates the header with metadata about the data that is meaningless here; another reason why this class maybe shouldn't inherit directly from BinTableHDU... """ return ExtensionHDU._writeheader(self, fileobj) def _writedata(self, fileobj): """ Wrap the basic ``_writedata`` method to restore the ``.data`` attribute to the uncompressed image data in the case of an exception. """ try: return super()._writedata(fileobj) finally: # Restore the .data attribute to its rightful value (if any) if hasattr(self, '_imagedata'): self.__dict__['data'] = self._imagedata del self._imagedata else: del self.data def _close(self, closed=True): super()._close(closed=closed) # Also make sure to close access to the compressed data mmaps if (closed and self._data_loaded and _get_array_mmap(self.compressed_data) is not None): del self.compressed_data # TODO: This was copied right out of _ImageBaseHDU; get rid of it once we # find a way to rewrite this class as either a subclass or wrapper for an # ImageHDU def _dtype_for_bitpix(self): """ Determine the dtype that the data should be converted to depending on the BITPIX value in the header, and possibly on the BSCALE value as well. Returns None if there should not be any change. """ bitpix = self._orig_bitpix # Handle possible conversion to uints if enabled if self._uint and self._orig_bscale == 1: for bits, dtype in ((16, np.dtype('uint16')), (32, np.dtype('uint32')), (64, np.dtype('uint64'))): if bitpix == bits and self._orig_bzero == 1 << (bits - 1): return dtype if bitpix > 16: # scale integers to Float64 return np.dtype('float64') elif bitpix > 0: # scale integers to Float32 return np.dtype('float32') def _update_header_scale_info(self, dtype=None): if (not self._do_not_scale_image_data and not (self._orig_bzero == 0 and self._orig_bscale == 1)): for keyword in ['BSCALE', 'BZERO']: # Make sure to delete from both the image header and the table # header; later this will be streamlined for header in (self.header, self._header): with suppress(KeyError): del header[keyword] # Since _update_header_scale_info can, currently, be # called *after* _prewriteto(), replace these with # blank cards so the header size doesn't change header.append() if dtype is None: dtype = self._dtype_for_bitpix() if dtype is not None: self.header['BITPIX'] = DTYPE2BITPIX[dtype.name] self._bzero = 0 self._bscale = 1 self._bitpix = self.header['BITPIX'] def _generate_dither_seed(self, seed): if not _is_int(seed): raise TypeError("Seed must be an integer") if not -1 <= seed <= 10000: raise ValueError( "Seed for random dithering must be either between 1 and " "10000 inclusive, 0 for autogeneration from the system " "clock, or -1 for autogeneration from a checksum of the first " "image tile (got {})".format(seed)) if seed == DITHER_SEED_CHECKSUM: # Determine the tile dimensions from the ZTILEn keywords naxis = self._header['ZNAXIS'] tile_dims = [self._header['ZTILE{}'.format(idx + 1)] for idx in range(naxis)] tile_dims.reverse() # Get the first tile by using the tile dimensions as the end # indices of slices (starting from 0) first_tile = self.data[tuple(slice(d) for d in tile_dims)] # The checksum algorithm used is literally just the sum of the bytes # of the tile data (not its actual floating point values). Integer # overflow is irrelevant. csum = first_tile.view(dtype='uint8').sum() # Since CFITSIO uses an unsigned long (which may be different on # different platforms) go ahead and truncate the sum to its # unsigned long value and take the result modulo 10000 return (ctypes.c_ulong(csum).value % 10000) + 1 elif seed == DITHER_SEED_CLOCK: # This isn't exactly the same algorithm as CFITSIO, but that's okay # since the result is meant to be arbitrary. The primary difference # is that CFITSIO incorporates the HDU number into the result in # the hopes of heading off the possibility of the same seed being # generated for two HDUs at the same time. Here instead we just # add in the HDU object's id return ((sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000) + 1 else: return seed
fd9e7c60a107f8368412df8d408551cbe1a9f0c14b23da23dd6952fe1f315d67
# Licensed under a 3-clause BSD style license - see PYFITS.rst import datetime import os import sys import warnings from contextlib import suppress from inspect import signature, Parameter import numpy as np from .. import conf from ..file import _File from ..header import Header, _pad_length from ..util import (_is_int, _is_pseudo_unsigned, _unsigned_zero, itersubclasses, decode_ascii, _get_array_mmap, first, _free_space_check, _extract_number) from ..verify import _Verify, _ErrList from ....utils import lazyproperty from ....utils.exceptions import AstropyUserWarning from ....utils.decorators import deprecated_renamed_argument class _Delayed: pass DELAYED = _Delayed() BITPIX2DTYPE = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64', -32: 'float32', -64: 'float64'} """Maps FITS BITPIX values to Numpy dtype names.""" DTYPE2BITPIX = {'uint8': 8, 'int16': 16, 'uint16': 16, 'int32': 32, 'uint32': 32, 'int64': 64, 'uint64': 64, 'float32': -32, 'float64': -64} """ Maps Numpy dtype names to FITS BITPIX values (this includes unsigned integers, with the assumption that the pseudo-unsigned integer convention will be used in this case. """ class InvalidHDUException(Exception): """ A custom exception class used mainly to signal to _BaseHDU.__new__ that an HDU cannot possibly be considered valid, and must be assumed to be corrupted. """ def _hdu_class_from_header(cls, header): """ Used primarily by _BaseHDU.__new__ to find an appropriate HDU class to use based on values in the header. See the _BaseHDU.__new__ docstring. """ klass = cls # By default, if no subclasses are defined if header: for c in reversed(list(itersubclasses(cls))): try: # HDU classes built into astropy.io.fits are always considered, # but extension HDUs must be explicitly registered if not (c.__module__.startswith('astropy.io.fits.') or c in cls._hdu_registry): continue if c.match_header(header): klass = c break except NotImplementedError: continue except Exception as exc: warnings.warn( 'An exception occurred matching an HDU header to the ' 'appropriate HDU type: {0}'.format(exc), AstropyUserWarning) warnings.warn('The HDU will be treated as corrupted.', AstropyUserWarning) klass = _CorruptedHDU del exc break return klass class _BaseHDUMeta(type): def __init__(cls, name, bases, members): # The sole purpose of this metaclass right now is to add the same # data.deleter to all HDUs with a data property. # It's unfortunate, but there's otherwise no straightforward way # that a property can inherit setters/deleters of the property of the # same name on base classes if 'data' in members: data_prop = members['data'] if (isinstance(data_prop, (lazyproperty, property)) and data_prop.fdel is None): # Don't do anything if the class has already explicitly # set the deleter for its data property def data(self): # The deleter if self._file is not None and self._data_loaded: data_refcount = sys.getrefcount(self.data) # Manually delete *now* so that FITS_rec.__del__ # cleanup can happen if applicable del self.__dict__['data'] # Don't even do this unless the *only* reference to the # .data array was the one we're deleting by deleting # this attribute; if any other references to the array # are hanging around (perhaps the user ran ``data = # hdu.data``) don't even consider this: if data_refcount == 2: self._file._maybe_close_mmap() setattr(cls, 'data', data_prop.deleter(data)) # TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that # matter) class _BaseHDU(metaclass=_BaseHDUMeta): """Base class for all HDU (header data unit) classes.""" _hdu_registry = set() # This HDU type is part of the FITS standard _standard = True # Byte to use for padding out blocks _padding_byte = '\x00' _default_name = '' def __new__(cls, data=None, header=None, *args, **kwargs): """ Iterates through the subclasses of _BaseHDU and uses that class's match_header() method to determine which subclass to instantiate. It's important to be aware that the class hierarchy is traversed in a depth-last order. Each match_header() should identify an HDU type as uniquely as possible. Abstract types may choose to simply return False or raise NotImplementedError to be skipped. If any unexpected exceptions are raised while evaluating match_header(), the type is taken to be _CorruptedHDU. """ klass = _hdu_class_from_header(cls, header) return super().__new__(klass) def __init__(self, data=None, header=None, *args, **kwargs): if header is None: header = Header() self._header = header self._file = None self._buffer = None self._header_offset = None self._data_offset = None self._data_size = None # This internal variable is used to track whether the data attribute # still points to the same data array as when the HDU was originally # created (this does not track whether the data is actually the same # content-wise) self._data_replaced = False self._data_needs_rescale = False self._new = True self._output_checksum = False if 'DATASUM' in self._header and 'CHECKSUM' not in self._header: self._output_checksum = 'datasum' elif 'CHECKSUM' in self._header: self._output_checksum = True @property def header(self): return self._header @header.setter def header(self, value): self._header = value @property def name(self): # Convert the value to a string to be flexible in some pathological # cases (see ticket #96) return str(self._header.get('EXTNAME', self._default_name)) @name.setter def name(self, value): if not isinstance(value, str): raise TypeError("'name' attribute must be a string") if not conf.extension_name_case_sensitive: value = value.upper() if 'EXTNAME' in self._header: self._header['EXTNAME'] = value else: self._header['EXTNAME'] = (value, 'extension name') @property def ver(self): return self._header.get('EXTVER', 1) @ver.setter def ver(self, value): if not _is_int(value): raise TypeError("'ver' attribute must be an integer") if 'EXTVER' in self._header: self._header['EXTVER'] = value else: self._header['EXTVER'] = (value, 'extension value') @property def level(self): return self._header.get('EXTLEVEL', 1) @level.setter def level(self, value): if not _is_int(value): raise TypeError("'level' attribute must be an integer") if 'EXTLEVEL' in self._header: self._header['EXTLEVEL'] = value else: self._header['EXTLEVEL'] = (value, 'extension level') @property def is_image(self): return ( self.name == 'PRIMARY' or ('XTENSION' in self._header and (self._header['XTENSION'] == 'IMAGE' or (self._header['XTENSION'] == 'BINTABLE' and 'ZIMAGE' in self._header and self._header['ZIMAGE'] is True)))) @property def _data_loaded(self): return ('data' in self.__dict__ and self.data is not DELAYED) @property def _has_data(self): return self._data_loaded and self.data is not None @classmethod def register_hdu(cls, hducls): cls._hdu_registry.add(hducls) @classmethod def unregister_hdu(cls, hducls): if hducls in cls._hdu_registry: cls._hdu_registry.remove(hducls) @classmethod def match_header(cls, header): raise NotImplementedError @classmethod def fromstring(cls, data, checksum=False, ignore_missing_end=False, **kwargs): """ Creates a new HDU object of the appropriate type from a string containing the HDU's entire header and, optionally, its data. Note: When creating a new HDU from a string without a backing file object, the data of that HDU may be read-only. It depends on whether the underlying string was an immutable Python str/bytes object, or some kind of read-write memory buffer such as a `memoryview`. Parameters ---------- data : str, bytearray, memoryview, ndarray A byte string containing the HDU's header and data. checksum : bool, optional Check the HDU's checksum and/or datasum. ignore_missing_end : bool, optional Ignore a missing end card in the header data. Note that without the end card the end of the header may be ambiguous and resulted in a corrupt HDU. In this case the assumption is that the first 2880 block that does not begin with valid FITS header data is the beginning of the data. kwargs : optional May consist of additional keyword arguments specific to an HDU type--these correspond to keywords recognized by the constructors of different HDU classes such as `PrimaryHDU`, `ImageHDU`, or `BinTableHDU`. Any unrecognized keyword arguments are simply ignored. """ return cls._readfrom_internal(data, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs) @classmethod def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False, **kwargs): """ Read the HDU from a file. Normally an HDU should be opened with :func:`open` which reads the entire HDU list in a FITS file. But this method is still provided for symmetry with :func:`writeto`. Parameters ---------- fileobj : file object or file-like object Input FITS file. The file's seek pointer is assumed to be at the beginning of the HDU. checksum : bool If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. ignore_missing_end : bool Do not issue an exception when opening a file that is missing an ``END`` card in the last header. """ # TODO: Figure out a way to make it possible for the _File # constructor to be a noop if the argument is already a _File if not isinstance(fileobj, _File): fileobj = _File(fileobj) hdu = cls._readfrom_internal(fileobj, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs) # If the checksum had to be checked the data may have already been read # from the file, in which case we don't want to seek relative fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET) return hdu @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def writeto(self, name, output_verify='exception', overwrite=False, checksum=False): """ Write the HDU to a new file. This is a convenience method to provide a user easier output interface if only one HDU needs to be written to a file. Parameters ---------- name : file path, file object or file-like object Output FITS file. If the file object is already opened, it must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header of the HDU when written to the file. """ from .hdulist import HDUList hdulist = HDUList([self]) hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum) @classmethod def _readfrom_internal(cls, data, header=None, checksum=False, ignore_missing_end=False, **kwargs): """ Provides the bulk of the internal implementation for readfrom and fromstring. For some special cases, supports using a header that was already created, and just using the input data for the actual array data. """ hdu_buffer = None hdu_fileobj = None header_offset = 0 if isinstance(data, _File): if header is None: header_offset = data.tell() header = Header.fromfile(data, endcard=not ignore_missing_end) hdu_fileobj = data data_offset = data.tell() # *after* reading the header else: try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype='ubyte', buffer=data) except TypeError: raise TypeError( 'The provided object {!r} does not contain an underlying ' 'memory buffer. fromstring() requires an object that ' 'supports the buffer interface such as bytes, buffer, ' 'memoryview, ndarray, etc. This restriction is to ensure ' 'that efficient access to the array/table data is possible.' .format(data)) if header is None: def block_iter(nbytes): idx = 0 while idx < len(data): yield data[idx:idx + nbytes] idx += nbytes header_str, header = Header._from_blocks( block_iter, True, '', not ignore_missing_end, True) if len(data) > len(header_str): hdu_buffer = data elif data: hdu_buffer = data header_offset = 0 data_offset = len(header_str) # Determine the appropriate arguments to pass to the constructor from # self._kwargs. self._kwargs contains any number of optional arguments # that may or may not be valid depending on the HDU type cls = _hdu_class_from_header(cls, header) sig = signature(cls.__init__) new_kwargs = kwargs.copy() if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()): # If __init__ accepts arbitrary keyword arguments, then we can go # ahead and pass all keyword arguments; otherwise we need to delete # any that are invalid for key in kwargs: if key not in sig.parameters: del new_kwargs[key] hdu = cls(data=DELAYED, header=header, **new_kwargs) # One of these may be None, depending on whether the data came from a # file or a string buffer--later this will be further abstracted hdu._file = hdu_fileobj hdu._buffer = hdu_buffer hdu._header_offset = header_offset # beginning of the header area hdu._data_offset = data_offset # beginning of the data area # data area size, including padding size = hdu.size hdu._data_size = size + _pad_length(size) # Checksums are not checked on invalid HDU types if checksum and checksum != 'remove' and isinstance(hdu, _ValidHDU): hdu._verify_checksum_datasum() return hdu def _get_raw_data(self, shape, code, offset): """ Return raw array from either the HDU's memory buffer or underlying file. """ if isinstance(shape, int): shape = (shape,) if self._buffer: return np.ndarray(shape, dtype=code, buffer=self._buffer, offset=offset) elif self._file: return self._file.readarray(offset=offset, dtype=code, shape=shape) else: return None # TODO: Rework checksum handling so that it's not necessary to add a # checksum argument here # TODO: The BaseHDU class shouldn't even handle checksums since they're # only implemented on _ValidHDU... def _prewriteto(self, checksum=False, inplace=False): self._update_uint_scale_keywords() # Handle checksum self._update_checksum(checksum) def _update_uint_scale_keywords(self): """ If the data is unsigned int 16, 32, or 64 add BSCALE/BZERO cards to header. """ if (self._has_data and self._standard and _is_pseudo_unsigned(self.data.dtype)): # CompImageHDUs need TFIELDS immediately after GCOUNT, # so BSCALE has to go after TFIELDS if it exists. if 'TFIELDS' in self._header: self._header.set('BSCALE', 1, after='TFIELDS') elif 'GCOUNT' in self._header: self._header.set('BSCALE', 1, after='GCOUNT') else: self._header.set('BSCALE', 1) self._header.set('BZERO', _unsigned_zero(self.data.dtype), after='BSCALE') def _update_checksum(self, checksum, checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'): """Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or keywords with equivalent semantics given by the ``checksum_keyword`` and ``datasum_keyword`` arguments--see for example ``CompImageHDU`` for an example of why this might need to be overridden). """ # If the data is loaded it isn't necessarily 'modified', but we have no # way of knowing for sure modified = self._header._modified or self._data_loaded if checksum == 'remove': if checksum_keyword in self._header: del self._header[checksum_keyword] if datasum_keyword in self._header: del self._header[datasum_keyword] elif (modified or self._new or (checksum and ('CHECKSUM' not in self._header or 'DATASUM' not in self._header or not self._checksum_valid or not self._datasum_valid))): if checksum == 'datasum': self.add_datasum(datasum_keyword=datasum_keyword) elif checksum: self.add_checksum(checksum_keyword=checksum_keyword, datasum_keyword=datasum_keyword) def _postwriteto(self): # If data is unsigned integer 16, 32 or 64, remove the # BSCALE/BZERO cards if (self._has_data and self._standard and _is_pseudo_unsigned(self.data.dtype)): for keyword in ('BSCALE', 'BZERO'): with suppress(KeyError): del self._header[keyword] def _writeheader(self, fileobj): offset = 0 if not fileobj.simulateonly: with suppress(AttributeError, OSError): offset = fileobj.tell() self._header.tofile(fileobj) try: size = fileobj.tell() - offset except (AttributeError, OSError): size = len(str(self._header)) else: size = len(str(self._header)) return offset, size def _writedata(self, fileobj): # TODO: A lot of the simulateonly stuff should be moved back into the # _File class--basically it should turn write and flush into a noop offset = 0 size = 0 if not fileobj.simulateonly: fileobj.flush() try: offset = fileobj.tell() except OSError: offset = 0 if self._data_loaded or self._data_needs_rescale: if self.data is not None: size += self._writedata_internal(fileobj) # pad the FITS data block if size > 0: padding = _pad_length(size) * self._padding_byte # TODO: Not that this is ever likely, but if for some odd # reason _padding_byte is > 0x80 this will fail; but really if # somebody's custom fits format is doing that, they're doing it # wrong and should be reprimanded harshly. fileobj.write(padding.encode('ascii')) size += len(padding) else: # The data has not been modified or does not need need to be # rescaled, so it can be copied, unmodified, directly from an # existing file or buffer size += self._writedata_direct_copy(fileobj) # flush, to make sure the content is written if not fileobj.simulateonly: fileobj.flush() # return both the location and the size of the data area return offset, size def _writedata_internal(self, fileobj): """ The beginning and end of most _writedata() implementations are the same, but the details of writing the data array itself can vary between HDU types, so that should be implemented in this method. Should return the size in bytes of the data written. """ if not fileobj.simulateonly: fileobj.writearray(self.data) return self.data.size * self.data.itemsize def _writedata_direct_copy(self, fileobj): """Copies the data directly from one file/buffer to the new file. For now this is handled by loading the raw data from the existing data (including any padding) via a memory map or from an already in-memory buffer and using Numpy's existing file-writing facilities to write to the new file. If this proves too slow a more direct approach may be used. """ raw = self._get_raw_data(self._data_size, 'ubyte', self._data_offset) if raw is not None: fileobj.writearray(raw) return raw.nbytes else: return 0 # TODO: This is the start of moving HDU writing out of the _File class; # Though right now this is an internal private method (though still used by # HDUList, eventually the plan is to have this be moved into writeto() # somehow... def _writeto(self, fileobj, inplace=False, copy=False): try: dirname = os.path.dirname(fileobj._file.name) except AttributeError: dirname = None with _free_space_check(self, dirname): self._writeto_internal(fileobj, inplace, copy) def _writeto_internal(self, fileobj, inplace, copy): # For now fileobj is assumed to be a _File object if not inplace or self._new: header_offset, _ = self._writeheader(fileobj) data_offset, data_size = self._writedata(fileobj) # Set the various data location attributes on newly-written HDUs if self._new: self._header_offset = header_offset self._data_offset = data_offset self._data_size = data_size return hdrloc = self._header_offset hdrsize = self._data_offset - self._header_offset datloc = self._data_offset datsize = self._data_size if self._header._modified: # Seek to the original header location in the file self._file.seek(hdrloc) # This should update hdrloc with he header location in the new file hdrloc, hdrsize = self._writeheader(fileobj) # If the data is to be written below with self._writedata, that # will also properly update the data location; but it should be # updated here too datloc = hdrloc + hdrsize elif copy: # Seek to the original header location in the file self._file.seek(hdrloc) # Before writing, update the hdrloc with the current file position, # which is the hdrloc for the new file hdrloc = fileobj.tell() fileobj.write(self._file.read(hdrsize)) # The header size is unchanged, but the data location may be # different from before depending on if previous HDUs were resized datloc = fileobj.tell() if self._data_loaded: if self.data is not None: # Seek through the array's bases for an memmap'd array; we # can't rely on the _File object to give us this info since # the user may have replaced the previous mmap'd array if copy or self._data_replaced: # Of course, if we're copying the data to a new file # we don't care about flushing the original mmap; # instead just read it into the new file array_mmap = None else: array_mmap = _get_array_mmap(self.data) if array_mmap is not None: array_mmap.flush() else: self._file.seek(self._data_offset) datloc, datsize = self._writedata(fileobj) elif copy: datsize = self._writedata_direct_copy(fileobj) self._header_offset = hdrloc self._data_offset = datloc self._data_size = datsize self._data_replaced = False def _close(self, closed=True): # If the data was mmap'd, close the underlying mmap (this will # prevent any future access to the .data attribute if there are # not other references to it; if there are other references then # it is up to the user to clean those up if (closed and self._data_loaded and _get_array_mmap(self.data) is not None): del self.data # For backwards-compatibility, though nobody should have # been using this directly: _AllHDU = _BaseHDU # For convenience... # TODO: register_hdu could be made into a class decorator which would be pretty # cool, but only once 2.6 support is dropped. register_hdu = _BaseHDU.register_hdu unregister_hdu = _BaseHDU.unregister_hdu class _CorruptedHDU(_BaseHDU): """ A Corrupted HDU class. This class is used when one or more mandatory `Card`s are corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or ``END`` cards. A corrupted HDU usually means that the data size cannot be calculated or the ``END`` card is not found. In the case of a missing ``END`` card, the `Header` may also contain the binary data .. note:: In future, it may be possible to decipher where the last block of the `Header` ends, but this task may be difficult when the extension is a `TableHDU` containing ASCII data. """ @property def size(self): """ Returns the size (in bytes) of the HDU's data part. """ # Note: On compressed files this might report a negative size; but the # file is corrupt anyways so I'm not too worried about it. if self._buffer is not None: return len(self._buffer) - self._data_offset return self._file.size - self._data_offset def _summary(self): return (self.name, self.ver, 'CorruptedHDU') def verify(self): pass class _NonstandardHDU(_BaseHDU, _Verify): """ A Non-standard HDU class. This class is used for a Primary HDU when the ``SIMPLE`` Card has a value of `False`. A non-standard HDU comes from a file that resembles a FITS file but departs from the standards in some significant way. One example would be files where the numbers are in the DEC VAX internal storage format rather than the standard FITS most significant byte first. The header for this HDU should be valid. The data for this HDU is read from the file as a byte stream that begins at the first byte after the header ``END`` card and continues until the end of the file. """ _standard = False @classmethod def match_header(cls, header): """ Matches any HDU that has the 'SIMPLE' keyword but is not a standard Primary or Groups HDU. """ # The SIMPLE keyword must be in the first card card = header.cards[0] # The check that 'GROUPS' is missing is a bit redundant, since the # match_header for GroupsHDU will always be called before this one. if card.keyword == 'SIMPLE': if 'GROUPS' not in header and card.value is False: return True else: raise InvalidHDUException else: return False @property def size(self): """ Returns the size (in bytes) of the HDU's data part. """ if self._buffer is not None: return len(self._buffer) - self._data_offset return self._file.size - self._data_offset def _writedata(self, fileobj): """ Differs from the base class :class:`_writedata` in that it doesn't automatically add padding, and treats the data as a string of raw bytes instead of an array. """ offset = 0 size = 0 if not fileobj.simulateonly: fileobj.flush() try: offset = fileobj.tell() except OSError: offset = 0 if self.data is not None: if not fileobj.simulateonly: fileobj.write(self.data) # flush, to make sure the content is written fileobj.flush() size = len(self.data) # return both the location and the size of the data area return offset, size def _summary(self): return (self.name, self.ver, 'NonstandardHDU', len(self._header)) @lazyproperty def data(self): """ Return the file data. """ return self._get_raw_data(self.size, 'ubyte', self._data_offset) def _verify(self, option='warn'): errs = _ErrList([], unit='Card') # verify each card for card in self._header.cards: errs.append(card._verify(option)) return errs class _ValidHDU(_BaseHDU, _Verify): """ Base class for all HDUs which are not corrupted. """ def __init__(self, data=None, header=None, name=None, ver=None, **kwargs): super().__init__(data=data, header=header) # NOTE: private data members _checksum and _datasum are used by the # utility script "fitscheck" to detect missing checksums. self._checksum = None self._checksum_valid = None self._datasum = None self._datasum_valid = None if name is not None: self.name = name if ver is not None: self.ver = ver @classmethod def match_header(cls, header): """ Matches any HDU that is not recognized as having either the SIMPLE or XTENSION keyword in its header's first card, but is nonetheless not corrupted. TODO: Maybe it would make more sense to use _NonstandardHDU in this case? Not sure... """ return first(header.keys()) not in ('SIMPLE', 'XTENSION') @property def size(self): """ Size (in bytes) of the data portion of the HDU. """ size = 0 naxis = self._header.get('NAXIS', 0) if naxis > 0: size = 1 for idx in range(naxis): size = size * self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size) // 8 return size def filebytes(self): """ Calculates and returns the number of bytes that this HDU will write to a file. """ f = _File() # TODO: Fix this once new HDU writing API is settled on return self._writeheader(f)[1] + self._writedata(f)[1] def fileinfo(self): """ Returns a dictionary detailing information about the locations of this HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Returns ------- dict or None The dictionary details information about the locations of this HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ================================================ Key Value ========== ================================================ file File object associated with the HDU filemode Mode in which the file was opened (readonly, copyonwrite, update, append, ostream) hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ================================================ """ if hasattr(self, '_file') and self._file: return {'file': self._file, 'filemode': self._file.mode, 'hdrLoc': self._header_offset, 'datLoc': self._data_offset, 'datSpan': self._data_size} else: return None def copy(self): """ Make a copy of the HDU, both header and data are copied. """ if self.data is not None: data = self.data.copy() else: data = None return self.__class__(data=data, header=self._header.copy()) def _verify(self, option='warn'): errs = _ErrList([], unit='Card') is_valid = BITPIX2DTYPE.__contains__ # Verify location and value of mandatory keywords. # Do the first card here, instead of in the respective HDU classes, so # the checking is in order, in case of required cards in wrong order. if isinstance(self, ExtensionHDU): firstkey = 'XTENSION' firstval = self._extension else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, 0, None, firstval, option, errs) self.req_cards('BITPIX', 1, lambda v: (_is_int(v) and is_valid(v)), 8, option, errs) self.req_cards('NAXIS', 2, lambda v: (_is_int(v) and 0 <= v <= 999), 0, option, errs) naxis = self._header.get('NAXIS', 0) if naxis < 1000: for ax in range(3, naxis + 3): key = 'NAXIS' + str(ax - 2) self.req_cards(key, ax, lambda v: (_is_int(v) and v >= 0), _extract_number(self._header[key], default=1), option, errs) # Remove NAXISj cards where j is not in range 1, naxis inclusive. for keyword in self._header: if keyword.startswith('NAXIS') and len(keyword) > 5: try: number = int(keyword[5:]) if number <= 0 or number > naxis: raise ValueError except ValueError: err_text = ("NAXISj keyword out of range ('{}' when " "NAXIS == {})".format(keyword, naxis)) def fix(self=self, keyword=keyword): del self._header[keyword] errs.append( self.run_option(option=option, err_text=err_text, fix=fix, fix_text="Deleted.")) # Verify that the EXTNAME keyword exists and is a string if 'EXTNAME' in self._header: if not isinstance(self._header['EXTNAME'], str): err_text = 'The EXTNAME keyword must have a string value.' fix_text = 'Converted the EXTNAME keyword to a string value.' def fix(header=self._header): header['EXTNAME'] = str(header['EXTNAME']) errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # verify each card for card in self._header.cards: errs.append(card._verify(option)) return errs # TODO: Improve this API a little bit--for one, most of these arguments # could be optional def req_cards(self, keyword, pos, test, fix_value, option, errlist): """ Check the existence, location, and value of a required `Card`. Parameters ---------- keyword : str The keyword to validate pos : int, callable If an ``int``, this specifies the exact location this card should have in the header. Remember that Python is zero-indexed, so this means ``pos=0`` requires the card to be the first card in the header. If given a callable, it should take one argument--the actual position of the keyword--and return `True` or `False`. This can be used for custom evaluation. For example if ``pos=lambda idx: idx > 10`` this will check that the keyword's index is greater than 10. test : callable This should be a callable (generally a function) that is passed the value of the given keyword and returns `True` or `False`. This can be used to validate the value associated with the given keyword. fix_value : str, int, float, complex, bool, None A valid value for a FITS keyword to to use if the given ``test`` fails to replace an invalid value. In other words, this provides a default value to use as a replacement if the keyword's current value is invalid. If `None`, there is no replacement value and the keyword is unfixable. option : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. errlist : list A list of validation errors already found in the FITS file; this is used primarily for the validation system to collect errors across multiple HDUs and multiple calls to `req_cards`. Notes ----- If ``pos=None``, the card can be anywhere in the header. If the card does not exist, the new card will have the ``fix_value`` as its value when created. Also check the card's value by using the ``test`` argument. """ errs = errlist fix = None try: index = self._header.index(keyword) except ValueError: index = None fixable = fix_value is not None insert_pos = len(self._header) + 1 # If pos is an int, insert at the given position (and convert it to a # lambda) if _is_int(pos): insert_pos = pos pos = lambda x: x == insert_pos # if the card does not exist if index is None: err_text = "'{}' card does not exist.".format(keyword) fix_text = "Fixed by inserting a new '{}' card.".format(keyword) if fixable: # use repr to accommodate both string and non-string types # Boolean is also OK in this constructor card = (keyword, fix_value) def fix(self=self, insert_pos=insert_pos, card=card): self._header.insert(insert_pos, card) errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location is specified if pos is not None: if not pos(index): err_text = ("'{}' card at the wrong place " "(card {}).".format(keyword, index)) fix_text = ("Fixed by moving it to the right place " "(card {}).".format(insert_pos)) def fix(self=self, index=index, insert_pos=insert_pos): card = self._header.cards[index] del self._header[index] self._header.insert(insert_pos, card) errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified if test: val = self._header[keyword] if not test(val): err_text = ("'{}' card has invalid value '{}'.".format( keyword, val)) fix_text = ("Fixed by setting a new value '{}'.".format( fix_value)) if fixable: def fix(self=self, keyword=keyword, val=fix_value): self._header[keyword] = fix_value errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return errs def add_datasum(self, when=None, datasum_keyword='DATASUM'): """ Add the ``DATASUM`` card to this HDU with the value set to the checksum calculated for the data. Parameters ---------- when : str, optional Comment string for the card that by default represents the time when the checksum was calculated datasum_keyword : str, optional The name of the header keyword to store the datasum value in; this is typically 'DATASUM' per convention, but there exist use cases in which a different keyword should be used Returns ------- checksum : int The calculated datasum Notes ----- For testing purposes, provide a ``when`` argument to enable the comment value in the card to remain consistent. This will enable the generation of a ``CHECKSUM`` card with a consistent value. """ cs = self._calculate_datasum() if when is None: when = 'data unit checksum updated {}'.format(self._get_timestamp()) self._header[datasum_keyword] = (str(cs), when) return cs def add_checksum(self, when=None, override_datasum=False, checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'): """ Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with the values set to the checksum calculated for the HDU and the data respectively. The addition of the ``DATASUM`` card may be overridden. Parameters ---------- when : str, optional comment string for the cards; by default the comments will represent the time when the checksum was calculated override_datasum : bool, optional add the ``CHECKSUM`` card only checksum_keyword : str, optional The name of the header keyword to store the checksum value in; this is typically 'CHECKSUM' per convention, but there exist use cases in which a different keyword should be used datasum_keyword : str, optional See ``checksum_keyword`` Notes ----- For testing purposes, first call `add_datasum` with a ``when`` argument, then call `add_checksum` with a ``when`` argument and ``override_datasum`` set to `True`. This will provide consistent comments for both cards and enable the generation of a ``CHECKSUM`` card with a consistent value. """ if not override_datasum: # Calculate and add the data checksum to the header. data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword) else: # Just calculate the data checksum data_cs = self._calculate_datasum() if when is None: when = 'HDU checksum updated {}'.format(self._get_timestamp()) # Add the CHECKSUM card to the header with a value of all zeros. if datasum_keyword in self._header: self._header.set(checksum_keyword, '0' * 16, when, before=datasum_keyword) else: self._header.set(checksum_keyword, '0' * 16, when) csum = self._calculate_checksum(data_cs, checksum_keyword=checksum_keyword) self._header[checksum_keyword] = csum def verify_datasum(self): """ Verify that the value in the ``DATASUM`` keyword matches the value calculated for the ``DATASUM`` of the current HDU data. Returns ------- valid : int - 0 - failure - 1 - success - 2 - no ``DATASUM`` keyword present """ if 'DATASUM' in self._header: datasum = self._calculate_datasum() if datasum == int(self._header['DATASUM']): return 1 else: # Failed return 0 else: return 2 def verify_checksum(self): """ Verify that the value in the ``CHECKSUM`` keyword matches the value calculated for the current HDU CHECKSUM. Returns ------- valid : int - 0 - failure - 1 - success - 2 - no ``CHECKSUM`` keyword present """ if 'CHECKSUM' in self._header: if 'DATASUM' in self._header: datasum = self._calculate_datasum() else: datasum = 0 checksum = self._calculate_checksum(datasum) if checksum == self._header['CHECKSUM']: return 1 else: # Failed return 0 else: return 2 def _verify_checksum_datasum(self): """ Verify the checksum/datasum values if the cards exist in the header. Simply displays warnings if either the checksum or datasum don't match. """ if 'CHECKSUM' in self._header: self._checksum = self._header['CHECKSUM'] self._checksum_valid = self.verify_checksum() if not self._checksum_valid: warnings.warn( 'Checksum verification failed for HDU {0}.\n'.format( (self.name, self.ver)), AstropyUserWarning) if 'DATASUM' in self._header: self._datasum = self._header['DATASUM'] self._datasum_valid = self.verify_datasum() if not self._datasum_valid: warnings.warn( 'Datasum verification failed for HDU {0}.\n'.format( (self.name, self.ver)), AstropyUserWarning) def _get_timestamp(self): """ Return the current timestamp in ISO 8601 format, with microseconds stripped off. Ex.: 2007-05-30T19:05:11 """ return datetime.datetime.now().isoformat()[:19] def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if not self._data_loaded: # This is the case where the data has not been read from the file # yet. We find the data in the file, read it, and calculate the # datasum. if self.size > 0: raw_data = self._get_raw_data(self._data_size, 'ubyte', self._data_offset) return self._compute_checksum(raw_data) else: return 0 elif self.data is not None: return self._compute_checksum(self.data.view('ubyte')) else: return 0 def _calculate_checksum(self, datasum, checksum_keyword='CHECKSUM'): """ Calculate the value of the ``CHECKSUM`` card in the HDU. """ old_checksum = self._header[checksum_keyword] self._header[checksum_keyword] = '0' * 16 # Convert the header to bytes. s = self._header.tostring().encode('utf8') # Calculate the checksum of the Header and data. cs = self._compute_checksum(np.frombuffer(s, dtype='ubyte'), datasum) # Encode the checksum into a string. s = self._char_encode(~cs) # Return the header card value. self._header[checksum_keyword] = old_checksum return s def _compute_checksum(self, data, sum32=0): """ Compute the ones-complement checksum of a sequence of bytes. Parameters ---------- data a memory region to checksum sum32 incremental checksum value from another region Returns ------- ones complement checksum """ blocklen = 2880 sum32 = np.uint32(sum32) for i in range(0, len(data), blocklen): length = min(blocklen, len(data) - i) # ???? sum32 = self._compute_hdu_checksum(data[i:i + length], sum32) return sum32 def _compute_hdu_checksum(self, data, sum32=0): """ Translated from FITS Checksum Proposal by Seaman, Pence, and Rots. Use uint32 literals as a hedge against type promotion to int64. This code should only be called with blocks of 2880 bytes Longer blocks result in non-standard checksums with carry overflow Historically, this code *was* called with larger blocks and for that reason still needs to be for backward compatibility. """ u8 = np.uint32(8) u16 = np.uint32(16) uFFFF = np.uint32(0xFFFF) if data.nbytes % 2: last = data[-1] data = data[:-1] else: last = np.uint32(0) data = data.view('>u2') hi = sum32 >> u16 lo = sum32 & uFFFF hi += np.add.reduce(data[0::2], dtype=np.uint64) lo += np.add.reduce(data[1::2], dtype=np.uint64) if (data.nbytes // 2) % 2: lo += last << u8 else: hi += last << u8 hicarry = hi >> u16 locarry = lo >> u16 while hicarry or locarry: hi = (hi & uFFFF) + locarry lo = (lo & uFFFF) + hicarry hicarry = hi >> u16 locarry = lo >> u16 return (hi << u16) + lo # _MASK and _EXCLUDE used for encoding the checksum value into a character # string. _MASK = [0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF] _EXCLUDE = [0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60] def _encode_byte(self, byte): """ Encode a single byte. """ quotient = byte // 4 + ord('0') remainder = byte % 4 ch = np.array( [(quotient + remainder), quotient, quotient, quotient], dtype='int32') check = True while check: check = False for x in self._EXCLUDE: for j in [0, 2]: if ch[j] == x or ch[j + 1] == x: ch[j] += 1 ch[j + 1] -= 1 check = True return ch def _char_encode(self, value): """ Encodes the checksum ``value`` using the algorithm described in SPR section A.7.2 and returns it as a 16 character string. Parameters ---------- value a checksum Returns ------- ascii encoded checksum """ value = np.uint32(value) asc = np.zeros((16,), dtype='byte') ascii = np.zeros((16,), dtype='byte') for i in range(4): byte = (value & self._MASK[i]) >> ((3 - i) * 8) ch = self._encode_byte(byte) for j in range(4): asc[4 * j + i] = ch[j] for i in range(16): ascii[i] = asc[(i + 15) % 16] return decode_ascii(ascii.tostring()) class ExtensionHDU(_ValidHDU): """ An extension HDU class. This class is the base class for the `TableHDU`, `ImageHDU`, and `BinTableHDU` classes. """ _extension = '' @classmethod def match_header(cls, header): """ This class should never be instantiated directly. Either a standard extension HDU type should be used for a specific extension, or NonstandardExtHDU should be used. """ raise NotImplementedError @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def writeto(self, name, output_verify='exception', overwrite=False, checksum=False): """ Works similarly to the normal writeto(), but prepends a default `PrimaryHDU` are required by extension HDUs (which cannot stand on their own). .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. """ from .hdulist import HDUList from .image import PrimaryHDU hdulist = HDUList([PrimaryHDU(), self]) hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum) def _verify(self, option='warn'): errs = super()._verify(option=option) # Verify location and value of mandatory keywords. naxis = self._header.get('NAXIS', 0) self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v >= 0), 0, option, errs) self.req_cards('GCOUNT', naxis + 4, lambda v: (_is_int(v) and v == 1), 1, option, errs) return errs # For backwards compatibility, though this needs to be deprecated # TODO: Mark this as deprecated _ExtensionHDU = ExtensionHDU class NonstandardExtHDU(ExtensionHDU): """ A Non-standard Extension HDU class. This class is used for an Extension HDU when the ``XTENSION`` `Card` has a non-standard value. In this case, Astropy can figure out how big the data is but not what it is. The data for this HDU is read from the file as a byte stream that begins at the first byte after the header ``END`` card and continues until the beginning of the next header or the end of the file. """ _standard = False @classmethod def match_header(cls, header): """ Matches any extension HDU that is not one of the standard extension HDU types. """ card = header.cards[0] xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() # A3DTABLE is not really considered a 'standard' extension, as it was # sort of the prototype for BINTABLE; however, since our BINTABLE # implementation handles A3DTABLE HDUs it is listed here. standard_xtensions = ('IMAGE', 'TABLE', 'BINTABLE', 'A3DTABLE') # The check that xtension is not one of the standard types should be # redundant. return (card.keyword == 'XTENSION' and xtension not in standard_xtensions) def _summary(self): return (self.name, self.ver, 'NonstandardExtHDU', len(self._header)) @lazyproperty def data(self): """ Return the file data. """ return self._get_raw_data(self.size, 'ubyte', self._data_offset) # TODO: Mark this as deprecated _NonstandardExtHDU = NonstandardExtHDU
f8f51f64a4d376320d0d35ad9222abdabbe007b724666e4b98c6f2779e7f8e1e
# Licensed under a 3-clause BSD style license - see PYFITS.rst import bz2 import gzip import itertools import os import shutil import sys import warnings import numpy as np from . import compressed from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU from .groups import GroupsHDU from .image import PrimaryHDU, ImageHDU from ..file import _File from ..header import _pad_length from ..util import (_is_int, _tmp_name, fileobj_closed, ignore_sigint, _get_array_mmap, _free_space_check) from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning from ....utils import indent from ....utils.exceptions import AstropyUserWarning from ....utils.decorators import deprecated_renamed_argument def fitsopen(name, mode='readonly', memmap=None, save_backup=False, cache=True, lazy_load_hdus=None, **kwargs): """Factory function to open a FITS file and return an `HDUList` object. Parameters ---------- name : file path, file object, file-like object or pathlib.Path object File to be opened. mode : str, optional Open mode, 'readonly' (default), 'update', 'append', 'denywrite', or 'ostream'. If ``name`` is a file object that is already opened, ``mode`` must match the mode the file was opened with, readonly (rb), update (rb+), append (ab+), ostream (w), denywrite (rb)). memmap : bool, optional Is memory mapping to be used? save_backup : bool, optional If the file was opened in update or append mode, this ensures that a backup of the original file is saved before any changes are flushed. The backup has the same name as the original file with ".bak" appended. If "file.bak" already exists then "file.bak.1" is used, and so on. cache : bool, optional If the file name is a URL, `~astropy.utils.data.download_file` is used to open the file. This specifies whether or not to save the file locally in Astropy's download cache (default: `True`). lazy_load_hdus : bool, option By default `~astropy.io.fits.open` will not read all the HDUs and headers in a FITS file immediately upon opening. This is an optimization especially useful for large files, as FITS has no way of determining the number and offsets of all the HDUs in a file without scanning through the file and reading all the headers. To disable lazy loading and read all HDUs immediately (the old behavior) use ``lazy_load_hdus=False``. This can lead to fewer surprises--for example with lazy loading enabled, ``len(hdul)`` can be slow, as it means the entire FITS file needs to be read in order to determine the number of HDUs. ``lazy_load_hdus=False`` ensures that all HDUs have already been loaded after the file has been opened. .. versionadded:: 1.3 kwargs : dict, optional additional optional keyword arguments, possible values are: - **uint** : bool Interpret signed integer data where ``BZERO`` is the central value and ``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as ``uint16`` data. This is enabled by default so that the pseudo-unsigned integer convention is assumed. Note, for backward compatibility, the kwarg **uint16** may be used instead. The kwarg was renamed when support was added for integers of any size. - **ignore_missing_end** : bool Do not issue an exception when opening a file that is missing an ``END`` card in the last header. - **checksum** : bool, str If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. Updates to a file that already has a checksum will preserve and update the existing checksums unless this argument is given a value of 'remove', in which case the CHECKSUM and DATASUM values are not checked, and are removed when saving changes to the file. - **disable_image_compression** : bool If `True`, treats compressed image HDU's like normal binary table HDU's. - **do_not_scale_image_data** : bool If `True`, image data is not scaled using BSCALE/BZERO values when read. - **character_as_bytes** : bool Whether to return bytes for string columns. By default this is `False` and (unicode) strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. - **ignore_blank** : bool If `True`, the BLANK keyword is ignored if present. - **scale_back** : bool If `True`, when saving changes to a file that contained scaled image data, restore the data to the original type and reapply the original BSCALE/BZERO values. This could lead to loss of accuracy if scaling back to integer values after performing floating point operations on the data. Returns ------- hdulist : an `HDUList` object `HDUList` containing all of the header data units in the file. """ from .. import conf if memmap is None: # distinguish between True (kwarg explicitly set) # and None (preference for memmap in config, might be ignored) memmap = None if conf.use_memmap else False else: memmap = bool(memmap) if lazy_load_hdus is None: lazy_load_hdus = conf.lazy_load_hdus else: lazy_load_hdus = bool(lazy_load_hdus) if 'uint' not in kwargs: kwargs['uint'] = conf.enable_uint if not name: raise ValueError('Empty filename: {!r}'.format(name)) return HDUList.fromfile(name, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs) class HDUList(list, _Verify): """ HDU list class. This is the top-level FITS object. When a FITS file is opened, a `HDUList` object is returned. """ def __init__(self, hdus=[], file=None): """ Construct a `HDUList` object. Parameters ---------- hdus : sequence of HDU objects or single HDU, optional The HDU object(s) to comprise the `HDUList`. Should be instances of HDU classes like `ImageHDU` or `BinTableHDU`. file : file object, bytes, optional The opened physical file associated with the `HDUList` or a bytes object containing the contents of the FITS file. """ if isinstance(file, bytes): self._data = file self._file = None else: self._file = file self._data = None self._save_backup = False # For internal use only--the keyword args passed to fitsopen / # HDUList.fromfile/string when opening the file self._open_kwargs = {} self._in_read_next_hdu = False # If we have read all the HDUs from the file or not # The assumes that all HDUs have been written when we first opened the # file; we do not currently support loading additional HDUs from a file # while it is being streamed to. In the future that might be supported # but for now this is only used for the purpose of lazy-loading of # existing HDUs. if file is None: self._read_all = True elif self._file is not None: # Should never attempt to read HDUs in ostream mode self._read_all = self._file.mode == 'ostream' else: self._read_all = False if hdus is None: hdus = [] # can take one HDU, as well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise TypeError("Invalid input for HDUList.") for idx, hdu in enumerate(hdus): if not isinstance(hdu, _BaseHDU): raise TypeError("Element {} in the HDUList input is " "not an HDU.".format(idx)) super().__init__(hdus) if file is None: # Only do this when initializing from an existing list of HDUs # When initalizing from a file, this will be handled by the # append method after the first HDU is read self.update_extend() def __len__(self): if not self._in_read_next_hdu: while self._read_next_hdu(): pass return super().__len__() def __repr__(self): # In order to correctly repr an HDUList we need to load all the # HDUs as well while self._read_next_hdu(): pass return super().__repr__() def __iter__(self): # While effectively this does the same as: # for idx in range(len(self)): # yield self[idx] # the more complicated structure is here to prevent the use of len(), # which would break the lazy loading for idx in itertools.count(): try: yield self[idx] except IndexError: break def __getitem__(self, key): """ Get an HDU from the `HDUList`, indexed by number or name. """ # If the key is a slice we need to make sure the necessary HDUs # have been loaded before passing the slice on to super. if isinstance(key, slice): max_idx = key.stop # Check for and handle the case when no maximum was # specified (e.g. [1:]). if max_idx is None: # We need all of the HDUs, so load them # and reset the maximum to the actual length. max_idx = len(self) # Just in case the max_idx is negative... max_idx = self._positive_index_of(max_idx) number_loaded = super().__len__() if max_idx >= number_loaded: # We need more than we have, try loading up to and including # max_idx. Note we do not try to be clever about skipping HDUs # even though key.step might conceivably allow it. for i in range(number_loaded, max_idx): # Read until max_idx or to the end of the file, whichever # comes first. if not self._read_next_hdu(): break try: hdus = super().__getitem__(key) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') else: return HDUList(hdus) # Originally this used recursion, but hypothetically an HDU with # a very large number of HDUs could blow the stack, so use a loop # instead try: return self._try_while_unread_hdus(super().__getitem__, self._positive_index_of(key)) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') def __contains__(self, item): """ Returns `True` if ``HDUList.index_of(item)`` succeeds. """ try: self._try_while_unread_hdus(self.index_of, item) except KeyError: return False return True def __setitem__(self, key, hdu): """ Set an HDU to the `HDUList`, indexed by number or name. """ _key = self._positive_index_of(key) if isinstance(hdu, (slice, list)): if _is_int(_key): raise ValueError('An element in the HDUList must be an HDU.') for item in hdu: if not isinstance(item, _BaseHDU): raise ValueError('{} is not an HDU.'.format(item)) else: if not isinstance(hdu, _BaseHDU): raise ValueError('{} is not an HDU.'.format(hdu)) try: self._try_while_unread_hdus(super().__setitem__, _key, hdu) except IndexError: raise IndexError('Extension {} is out of bound or not found.' .format(key)) self._resize = True self._truncate = False def __delitem__(self, key): """ Delete an HDU from the `HDUList`, indexed by number or name. """ if isinstance(key, slice): end_index = len(self) else: key = self._positive_index_of(key) end_index = len(self) - 1 self._try_while_unread_hdus(super().__delitem__, key) if (key == end_index or key == -1 and not self._resize): self._truncate = True else: self._truncate = False self._resize = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() @classmethod def fromfile(cls, fileobj, mode=None, memmap=None, save_backup=False, cache=True, lazy_load_hdus=True, **kwargs): """ Creates an `HDUList` instance from a file-like object. The actual implementation of ``fitsopen()``, and generally shouldn't be used directly. Use :func:`open` instead (and see its documentation for details of the parameters accepted by this method). """ return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap, save_backup=save_backup, cache=cache, lazy_load_hdus=lazy_load_hdus, **kwargs) @classmethod def fromstring(cls, data, **kwargs): """ Creates an `HDUList` instance from a string or other in-memory data buffer containing an entire FITS file. Similar to :meth:`HDUList.fromfile`, but does not accept the mode or memmap arguments, as they are only relevant to reading from a file on disk. This is useful for interfacing with other libraries such as CFITSIO, and may also be useful for streaming applications. Parameters ---------- data : str, buffer, memoryview, etc. A string or other memory buffer containing an entire FITS file. It should be noted that if that memory is read-only (such as a Python string) the returned :class:`HDUList`'s data portions will also be read-only. kwargs : dict Optional keyword arguments. See :func:`astropy.io.fits.open` for details. Returns ------- hdul : HDUList An :class:`HDUList` object representing the in-memory FITS file. """ try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype='ubyte', buffer=data) except TypeError: raise TypeError( 'The provided object {} does not contain an underlying ' 'memory buffer. fromstring() requires an object that ' 'supports the buffer interface such as bytes, buffer, ' 'memoryview, ndarray, etc. This restriction is to ensure ' 'that efficient access to the array/table data is possible.' ''.format(data)) return cls._readfrom(data=data, **kwargs) def fileinfo(self, index): """ Returns a dictionary detailing information about the locations of the indexed HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Parameters ---------- index : int Index of HDU for which info is to be returned. Returns ------- fileinfo : dict or None The dictionary details information about the locations of the indexed HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ======================================================== Key Value ========== ======================================================== file File object associated with the HDU filename Name of associated file object filemode Mode in which the file was opened (readonly, update, append, denywrite, ostream) resized Flag that when `True` indicates that the data has been resized since the last read/write so the returned values may not be valid. hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ======================================================== """ if self._file is not None: output = self[index].fileinfo() if not output: # OK, the HDU associated with this index is not yet # tied to the file associated with the HDUList. The only way # to get the file object is to check each of the HDU's in the # list until we find the one associated with the file. f = None for hdu in self: info = hdu.fileinfo() if info: f = info['file'] fm = info['filemode'] break output = {'file': f, 'filemode': fm, 'hdrLoc': None, 'datLoc': None, 'datSpan': None} output['filename'] = self._file.name output['resized'] = self._wasresized() else: output = None return output def insert(self, index, hdu): """ Insert an HDU into the `HDUList` at the given ``index``. Parameters ---------- index : int Index before which to insert the new HDU. hdu : HDU object The HDU object to insert """ if not isinstance(hdu, _BaseHDU): raise ValueError('{} is not an HDU.'.format(hdu)) num_hdus = len(self) if index == 0 or num_hdus == 0: if num_hdus != 0: # We are inserting a new Primary HDU so we need to # make the current Primary HDU into an extension HDU. if isinstance(self[0], GroupsHDU): raise ValueError( "The current Primary HDU is a GroupsHDU. " "It can't be made into an extension HDU, " "so another HDU cannot be inserted before it.") hdu1 = ImageHDU(self[0].data, self[0].header) # Insert it into position 1, then delete HDU at position 0. super().insert(1, hdu1) super().__delitem__(0) if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().insert(0, phdu) index = 1 else: if isinstance(hdu, GroupsHDU): raise ValueError('A GroupsHDU must be inserted as a ' 'Primary HDU.') if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. hdu = ImageHDU(hdu.data, hdu.header) super().insert(index, hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def append(self, hdu): """ Append a new HDU to the `HDUList`. Parameters ---------- hdu : HDU object HDU to add to the `HDUList`. """ if not isinstance(hdu, _BaseHDU): raise ValueError('HDUList can only append an HDU.') if len(self) > 0: if isinstance(hdu, GroupsHDU): raise ValueError( "Can't append a GroupsHDU to a non-empty HDUList") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. # TODO: This isn't necessarily sufficient to copy the HDU; # _header_offset and friends need to be copied too. hdu = ImageHDU(hdu.data, hdu.header) else: if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary # HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().append(phdu) super().append(hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def index_of(self, key): """ Get the index of an HDU from the `HDUList`. Parameters ---------- key : int, str or tuple of (string, int) The key identifying the HDU. If ``key`` is a tuple, it is of the form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous (it shouldn't be but it's not impossible) the numeric index must be used to index the duplicate HDU. Returns ------- index : int The index of the HDU in the `HDUList`. """ if _is_int(key): return key elif isinstance(key, tuple): _key, _ver = key else: _key = key _ver = None if not isinstance(_key, str): raise KeyError( '{} indices must be integers, extension names as strings, ' 'or (extname, version) tuples; got {}' ''.format(self.__class__.__name__, _key)) _key = (_key.strip()).upper() found = None for idx, hdu in enumerate(self): name = hdu.name if isinstance(name, str): name = name.strip().upper() # 'PRIMARY' should always work as a reference to the first HDU if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and (_ver is None or _ver == hdu.ver)): found = idx break if (found is None): raise KeyError('Extension {!r} not found.'.format(key)) else: return found def _positive_index_of(self, key): """ Same as index_of, but ensures always returning a positive index or zero. (Really this should be called non_negative_index_of but it felt too long.) This means that if the key is a negative integer, we have to convert it to the corresponding positive index. This means knowing the length of the HDUList, which in turn means loading all HDUs. Therefore using negative indices on HDULists is inherently inefficient. """ index = self.index_of(key) if index >= 0: return index if abs(index) > len(self): raise IndexError( 'Extension {} is out of bound or not found.'.format(index)) return len(self) + index def readall(self): """ Read data of all HDUs into memory. """ for hdu in self: if hdu.data is not None: continue @ignore_sigint def flush(self, output_verify='fix', verbose=False): """ Force a write of the `HDUList` back to the file (for append and update modes only). Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. verbose : bool When `True`, print verbose messages """ if self._file.mode not in ('append', 'update', 'ostream'): warnings.warn("Flush for '{}' mode is not supported." .format(self._file.mode), AstropyUserWarning) return if self._save_backup and self._file.mode in ('append', 'update'): filename = self._file.name if os.path.exists(filename): # The the file doesn't actually exist anymore for some reason # then there's no point in trying to make a backup backup = filename + '.bak' idx = 1 while os.path.exists(backup): backup = filename + '.bak.' + str(idx) idx += 1 warnings.warn('Saving a backup of {} to {}.'.format( filename, backup), AstropyUserWarning) try: shutil.copy(filename, backup) except OSError as exc: raise OSError('Failed to save backup to destination {}: ' '{}'.format(filename, exc)) self.verify(option=output_verify) if self._file.mode in ('append', 'ostream'): for hdu in self: if verbose: try: extver = str(hdu._header['extver']) except KeyError: extver = '' # only append HDU's which are "new" if hdu._new: hdu._prewriteto(checksum=hdu._output_checksum) with _free_space_check(self): hdu._writeto(self._file) if verbose: print('append HDU', hdu.name, extver) hdu._new = False hdu._postwriteto() elif self._file.mode == 'update': self._flush_update() def update_extend(self): """ Make sure that if the primary header needs the keyword ``EXTEND`` that it has it and it is correct. """ if not len(self): return if not isinstance(self[0], PrimaryHDU): # A PrimaryHDU will be automatically inserted at some point, but it # might not have been added yet return hdr = self[0].header def get_first_ext(): try: return self[1] except IndexError: return None if 'EXTEND' in hdr: if not hdr['EXTEND'] and get_first_ext() is not None: hdr['EXTEND'] = True elif get_first_ext() is not None: if hdr['NAXIS'] == 0: hdr.set('EXTEND', True, after='NAXIS') else: n = hdr['NAXIS'] hdr.set('EXTEND', True, after='NAXIS' + str(n)) @deprecated_renamed_argument('clobber', 'overwrite', '2.0') def writeto(self, fileobj, output_verify='exception', overwrite=False, checksum=False): """ Write the `HDUList` to a new file. Parameters ---------- fileobj : file path, file object or file-like object File to write to. If a file object, must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. .. versionchanged:: 1.3 ``overwrite`` replaces the deprecated ``clobber`` argument. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. """ if (len(self) == 0): warnings.warn("There is nothing to write.", AstropyUserWarning) return self.verify(option=output_verify) # make sure the EXTEND keyword is there if there is extension self.update_extend() # make note of whether the input file object is already open, in which # case we should not close it after writing (that should be the job # of the caller) closed = isinstance(fileobj, str) or fileobj_closed(fileobj) # writeto is only for writing a new file from scratch, so the most # sensible mode to require is 'ostream'. This can accept an open # file object that's open to write only, or in append/update modes # but only if the file doesn't exist. fileobj = _File(fileobj, mode='ostream', overwrite=overwrite) hdulist = self.fromfile(fileobj) try: dirname = os.path.dirname(hdulist._file.name) except AttributeError: dirname = None with _free_space_check(self, dirname=dirname): for hdu in self: hdu._prewriteto(checksum=checksum) hdu._writeto(hdulist._file) hdu._postwriteto() hdulist.close(output_verify=output_verify, closed=closed) def close(self, output_verify='exception', verbose=False, closed=True): """ Close the associated FITS file and memmap object, if any. Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify` for more info. verbose : bool When `True`, print out verbose messages. closed : bool When `True`, close the underlying file object. """ try: if (self._file and self._file.mode in ('append', 'update') and not self._file.closed): self.flush(output_verify=output_verify, verbose=verbose) finally: if self._file and closed and hasattr(self._file, 'close'): self._file.close() # Give individual HDUs an opportunity to do on-close cleanup for hdu in self: hdu._close(closed=closed) def info(self, output=None): """ Summarize the info of the HDUs in this `HDUList`. Note that this function prints its results to the console---it does not return a value. Parameters ---------- output : file, bool, optional A file-like object to write the output to. If `False`, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. """ if output is None: output = sys.stdout if self._file is None: name = '(No file associated with this HDUList)' else: name = self._file.name results = ['Filename: {}'.format(name), 'No. Name Ver Type Cards Dimensions Format'] format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}' default = ('', '', '', 0, (), '', '') for idx, hdu in enumerate(self): summary = hdu._summary() if len(summary) < len(default): summary += default[len(summary):] summary = (idx,) + summary if output: results.append(format.format(*summary)) else: results.append(summary) if output: output.write('\n'.join(results)) output.write('\n') output.flush() else: return results[2:] def filename(self): """ Return the file name associated with the HDUList object if one exists. Otherwise returns None. Returns ------- filename : a string containing the file name associated with the HDUList object if an association exists. Otherwise returns None. """ if self._file is not None: if hasattr(self._file, 'name'): return self._file.name return None @classmethod def _readfrom(cls, fileobj=None, data=None, mode=None, memmap=None, save_backup=False, cache=True, lazy_load_hdus=True, **kwargs): """ Provides the implementations from HDUList.fromfile and HDUList.fromstring, both of which wrap this method, as their implementations are largely the same. """ if fileobj is not None: if not isinstance(fileobj, _File): # instantiate a FITS file object (ffo) fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache) # The Astropy mode is determined by the _File initializer if the # supplied mode was None mode = fileobj.mode hdulist = cls(file=fileobj) else: if mode is None: # The default mode mode = 'readonly' hdulist = cls(file=data) # This method is currently only called from HDUList.fromstring and # HDUList.fromfile. If fileobj is None then this must be the # fromstring case; the data type of ``data`` will be checked in the # _BaseHDU.fromstring call. hdulist._save_backup = save_backup hdulist._open_kwargs = kwargs if fileobj is not None and fileobj.writeonly: # Output stream--not interested in reading/parsing # the HDUs--just writing to the output file return hdulist # Make sure at least the PRIMARY HDU can be read read_one = hdulist._read_next_hdu() # If we're trying to read only and no header units were found, # raise an exception if not read_one and mode in ('readonly', 'denywrite'): # Close the file if necessary (issue #6168) if hdulist._file.close_on_error: hdulist._file.close() raise OSError('Empty or corrupt FITS file') if not lazy_load_hdus: # Go ahead and load all HDUs while hdulist._read_next_hdu(): pass # initialize/reset attributes to be used in "update/append" mode hdulist._resize = False hdulist._truncate = False return hdulist def _try_while_unread_hdus(self, func, *args, **kwargs): """ Attempt an operation that accesses an HDU by index/name that can fail if not all HDUs have been read yet. Keep reading HDUs until the operation succeeds or there are no more HDUs to read. """ while True: try: return func(*args, **kwargs) except Exception: if self._read_next_hdu(): continue else: raise def _read_next_hdu(self): """ Lazily load a single HDU from the fileobj or data string the `HDUList` was opened from, unless no further HDUs are found. Returns True if a new HDU was loaded, or False otherwise. """ if self._read_all: return False saved_compression_enabled = compressed.COMPRESSION_ENABLED fileobj, data, kwargs = self._file, self._data, self._open_kwargs if fileobj is not None and fileobj.closed: return False try: self._in_read_next_hdu = True if ('disable_image_compression' in kwargs and kwargs['disable_image_compression']): compressed.COMPRESSION_ENABLED = False # read all HDUs try: if fileobj is not None: try: # Make sure we're back to the end of the last read # HDU if len(self) > 0: last = self[len(self) - 1] if last._data_offset is not None: offset = last._data_offset + last._data_size fileobj.seek(offset, os.SEEK_SET) hdu = _BaseHDU.readfrom(fileobj, **kwargs) except EOFError: self._read_all = True return False except OSError: # Close the file: see # https://github.com/astropy/astropy/issues/6168 # if self._file.close_on_error: self._file.close() if fileobj.writeonly: self._read_all = True return False else: raise else: if not data: self._read_all = True return False hdu = _BaseHDU.fromstring(data, **kwargs) self._data = data[hdu._data_offset + hdu._data_size:] super().append(hdu) if len(self) == 1: # Check for an extension HDU and update the EXTEND # keyword of the primary HDU accordingly self.update_extend() hdu._new = False if 'checksum' in kwargs: hdu._output_checksum = kwargs['checksum'] # check in the case there is extra space after the last HDU or # corrupted HDU except (VerifyError, ValueError) as exc: warnings.warn( 'Error validating header for HDU #{} (note: Astropy ' 'uses zero-based indexing).\n{}\n' 'There may be extra bytes after the last HDU or the ' 'file is corrupted.'.format( len(self), indent(str(exc))), VerifyWarning) del exc self._read_all = True return False finally: compressed.COMPRESSION_ENABLED = saved_compression_enabled self._in_read_next_hdu = False return True def _verify(self, option='warn'): errs = _ErrList([], unit='HDU') # the first (0th) element must be a primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \ (not isinstance(self[0], _NonstandardHDU)): err_text = "HDUList's 0th element is not a primary HDU." fix_text = 'Fixed by inserting one as 0th HDU.' def fix(self=self): self.insert(0, PrimaryHDU()) err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) errs.append(err) if len(self) > 1 and ('EXTEND' not in self[0].header or self[0].header['EXTEND'] is not True): err_text = ('Primary HDU does not contain an EXTEND keyword ' 'equal to T even though there are extension HDUs.') fix_text = 'Fixed by inserting or updating the EXTEND keyword.' def fix(header=self[0].header): naxis = header['NAXIS'] if naxis == 0: after = 'NAXIS' else: after = 'NAXIS' + str(naxis) header.set('EXTEND', value=True, after=after) errs.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # each element calls their own verify for idx, hdu in enumerate(self): if idx > 0 and (not isinstance(hdu, ExtensionHDU)): err_text = ("HDUList's element {} is not an " "extension HDU.".format(str(idx))) err = self.run_option(option, err_text=err_text, fixable=False) errs.append(err) else: result = hdu._verify(option) if result: errs.append(result) return errs def _flush_update(self): """Implements flushing changes to a file in update mode.""" for hdu in self: # Need to all _prewriteto() for each HDU first to determine if # resizing will be necessary hdu._prewriteto(checksum=hdu._output_checksum, inplace=True) try: self._wasresized() # if the HDUList is resized, need to write out the entire contents of # the hdulist to the file. if self._resize or self._file.compression: self._flush_resize() else: # if not resized, update in place for hdu in self: hdu._writeto(self._file, inplace=True) # reset the modification attributes after updating for hdu in self: hdu._header._modified = False finally: for hdu in self: hdu._postwriteto() def _flush_resize(self): """ Implements flushing changes in update mode when parts of one or more HDU need to be resized. """ old_name = self._file.name old_memmap = self._file.memmap name = _tmp_name(old_name) if not self._file.file_like: old_mode = os.stat(old_name).st_mode # The underlying file is an actual file object. The HDUList is # resized, so we need to write it to a tmp file, delete the # original file, and rename the tmp file to the original file. if self._file.compression == 'gzip': new_file = gzip.GzipFile(name, mode='ab+') elif self._file.compression == 'bzip2': new_file = bz2.BZ2File(name, mode='w') else: new_file = name with self.fromfile(new_file, mode='append') as hdulist: for hdu in self: hdu._writeto(hdulist._file, inplace=True, copy=True) if sys.platform.startswith('win'): # Collect a list of open mmaps to the data; this well be # used later. See below. mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data) for idx, hdu in enumerate(self) if hdu._has_data] hdulist._file.close() self._file.close() if sys.platform.startswith('win'): # Close all open mmaps to the data. This is only necessary on # Windows, which will not allow a file to be renamed or deleted # until all handles to that file have been closed. for idx, mmap, arr in mmaps: if mmap is not None: mmap.close() os.remove(self._file.name) # reopen the renamed new file with "update" mode os.rename(name, old_name) os.chmod(old_name, old_mode) if isinstance(new_file, gzip.GzipFile): old_file = gzip.GzipFile(old_name, mode='rb+') else: old_file = old_name ffo = _File(old_file, mode='update', memmap=old_memmap) self._file = ffo for hdu in self: # Need to update the _file attribute and close any open mmaps # on each HDU if hdu._has_data and _get_array_mmap(hdu.data) is not None: del hdu.data hdu._file = ffo if sys.platform.startswith('win'): # On Windows, all the original data mmaps were closed above. # However, it's possible that the user still has references to # the old data which would no longer work (possibly even cause # a segfault if they try to access it). This replaces the # buffers used by the original arrays with the buffers of mmap # arrays created from the new file. This seems to work, but # it's a flaming hack and carries no guarantees that it won't # lead to odd behavior in practice. Better to just not keep # references to data from files that had to be resized upon # flushing (on Windows--again, this is no problem on Linux). for idx, mmap, arr in mmaps: if mmap is not None: arr.data = self[idx].data.data del mmaps # Just to be sure else: # The underlying file is not a file object, it is a file like # object. We can't write out to a file, we must update the file # like object in place. To do this, we write out to a temporary # file, then delete the contents in our file like object, then # write the contents of the temporary file to the now empty file # like object. self.writeto(name) hdulist = self.fromfile(name) ffo = self._file ffo.truncate(0) ffo.seek(0) for hdu in hdulist: hdu._writeto(ffo, inplace=True, copy=True) # Close the temporary file and delete it. hdulist.close() os.remove(hdulist._file.name) # reset the resize attributes after updating self._resize = False self._truncate = False for hdu in self: hdu._header._modified = False hdu._new = False hdu._file = ffo def _wasresized(self, verbose=False): """ Determine if any changes to the HDUList will require a file resize when flushing the file. Side effect of setting the objects _resize attribute. """ if not self._resize: # determine if any of the HDU is resized for hdu in self: # Header: nbytes = len(str(hdu._header)) if nbytes != (hdu._data_offset - hdu._header_offset): self._resize = True self._truncate = False if verbose: print('One or more header is resized.') break # Data: if not hdu._has_data: continue nbytes = hdu.size nbytes = nbytes + _pad_length(nbytes) if nbytes != hdu._data_size: self._resize = True self._truncate = False if verbose: print('One or more data area is resized.') break if self._truncate: try: self._file.truncate(hdu._data_offset + hdu._data_size) except OSError: self._resize = True self._truncate = False return self._resize
7198b572dc185326a40a13f6ed5671a8335eb4d1f3ebb4928bf05629158a5b5b
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import os from .base import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from .image import PrimaryHDU from ..file import _File from ..header import _pad_length from ..util import fileobj_name class StreamingHDU: """ A class that provides the capability to stream data to a FITS file instead of requiring data to all be written at once. The following pseudocode illustrates its use:: header = astropy.io.fits.Header() for all the cards you need in the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close() """ def __init__(self, name, header): """ Construct a `StreamingHDU` object given a file name and a header. Parameters ---------- name : file path, file object, or file like object The file to which the header and data will be streamed. If opened, the file object must be opened in a writeable binary mode such as 'wb' or 'ab+'. header : `Header` instance The header object associated with the data to be written to the file. Notes ----- The file will be opened and the header appended to the end of the file. If the file does not already exist, it will be created, and if the header represents a Primary header, it will be written to the beginning of the file. If the file does not exist and the provided header is not a Primary header, a default Primary HDU will be inserted at the beginning of the file and the provided header will be added as the first extension. If the file does already exist, but the provided header represents a Primary header, the header will be modified to an image extension header and appended to the end of the file. """ if isinstance(name, gzip.GzipFile): raise TypeError('StreamingHDU not supported for GzipFile objects.') self._header = header.copy() # handle a file object instead of a file name filename = fileobj_name(name) or '' # Check if the file already exists. If it does not, check to see # if we were provided with a Primary Header. If not we will need # to prepend a default PrimaryHDU to the file before writing the # given header. newfile = False if filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True elif (hasattr(name, 'len') and name.len == 0): newfile = True if newfile: if 'SIMPLE' not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: # This will not be the first extension in the file so we # must change the Primary header provided into an image # extension header. if 'SIMPLE' in self._header: self._header.set('XTENSION', 'IMAGE', 'Image extension', after='SIMPLE') del self._header['SIMPLE'] if 'PCOUNT' not in self._header: dim = self._header['NAXIS'] if dim == 0: dim = '' else: dim = str(dim) self._header.set('PCOUNT', 0, 'number of parameters', after='NAXIS' + dim) if 'GCOUNT' not in self._header: self._header.set('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') # TODO : Fix this once the HDU writing API is cleaned up tmp_hdu = _BaseHDU() # Passing self._header as an argument to _BaseHDU() will cause its # values to be modified in undesired ways...need to have a better way # of doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if self._size != 0: self.writecomplete = False else: self.writecomplete = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def write(self, data): """ Write the given data to the stream. Parameters ---------- data : ndarray Data to stream to the file. Returns ------- writecomplete : int Flag that when `True` indicates that all of the required data has been written to the stream. Notes ----- Only the amount of data specified in the header provided to the class constructor may be written to the stream. If the provided data would cause the stream to overflow, an `OSError` exception is raised and the data is not written. Once sufficient data has been written to the stream to satisfy the amount specified in the header, the stream is padded to fill a complete FITS block and no more data will be accepted. An attempt to write more data after the stream has been filled will raise an `OSError` exception. If the dtype of the input data does not match what is expected by the header, a `TypeError` exception is raised. """ size = self._ffo.tell() - self._data_offset if self.writecomplete or size + data.nbytes > self._size: raise OSError('Attempt to write more data to the stream than the ' 'header specified.') if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name: raise TypeError('Supplied data does not match the type specified ' 'in the header.') if data.dtype.str[0] != '>': # byteswap little endian arrays before writing output = data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: # the stream is full so pad the data to the next FITS block self._ffo.write(_pad_length(self._size) * '\0') self.writecomplete = True self._ffo.flush() return self.writecomplete @property def size(self): """ Return the size (in bytes) of the data portion of the HDU. """ size = 0 naxis = self._header.get('NAXIS', 0) if naxis > 0: simple = self._header.get('SIMPLE', 'F') random_groups = self._header.get('GROUPS', 'F') if simple == 'T' and random_groups == 'T': groups = 1 else: groups = 0 size = 1 for idx in range(groups, naxis): size = size * self._header['NAXIS' + str(idx + 1)] bitpix = self._header['BITPIX'] gcount = self._header.get('GCOUNT', 1) pcount = self._header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size) // 8 return size def close(self): """ Close the physical FITS file. """ self._ffo.close()
0dd3dacd795ae65492dfbaf187a3f9375588dc97e03ec2492246ce9d2edf1c09
# Licensed under a 3-clause BSD style license - see PYFITS.rst import contextlib import copy import gc import pickle import re import pytest import numpy as np from numpy import char as chararray try: import objgraph HAVE_OBJGRAPH = True except ImportError: HAVE_OBJGRAPH = False from ....io import fits from ....tests.helper import catch_warnings, ignore_warnings from ....utils.exceptions import AstropyDeprecationWarning from ..column import Delayed, NUMPY2FITS from ..util import decode_ascii from ..verify import VerifyError from . import FitsTestCase def comparefloats(a, b): """ Compare two float scalars or arrays and see if they are consistent Consistency is determined ensuring the difference is less than the expected amount. Return True if consistent, False if any differences. """ aa = a bb = b # compute expected precision if aa.dtype.name == 'float32' or bb.dtype.name == 'float32': precision = 0.000001 else: precision = 0.0000000000000001 precision = 0.00001 # until precision problem is fixed in astropy.io.fits diff = np.absolute(aa - bb) mask0 = aa == 0 masknz = aa != 0. if np.any(mask0): if diff[mask0].max() != 0.: return False if np.any(masknz): if (diff[masknz] / np.absolute(aa[masknz])).max() > precision: return False return True def comparerecords(a, b): """ Compare two record arrays Does this field by field, using approximation testing for float columns (Complex not yet handled.) Column names not compared, but column types and sizes are. """ nfieldsa = len(a.dtype.names) nfieldsb = len(b.dtype.names) if nfieldsa != nfieldsb: print("number of fields don't match") return False for i in range(nfieldsa): fielda = a.field(i) fieldb = b.field(i) if fielda.dtype.char == 'S': fielda = decode_ascii(fielda) if fieldb.dtype.char == 'S': fieldb = decode_ascii(fieldb) if (not isinstance(fielda, type(fieldb)) and not isinstance(fieldb, type(fielda))): print("type(fielda): ", type(fielda), " fielda: ", fielda) print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb) print('field {0} type differs'.format(i)) return False if len(fielda) and isinstance(fielda[0], np.floating): if not comparefloats(fielda, fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print('field {0} differs'.format(i)) return False elif (isinstance(fielda, fits.column._VLF) or isinstance(fieldb, fits.column._VLF)): for row in range(len(fielda)): if np.any(fielda[row] != fieldb[row]): print('fielda[{0}]: {1}'.format(row, fielda[row])) print('fieldb[{0}]: {1}'.format(row, fieldb[row])) print('field {0} differs in row {1}'.format(i, row)) else: if np.any(fielda != fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print('field {0} differs'.format(i)) return False return True class TestTableFunctions(FitsTestCase): def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. This is like the test of the same name in test_image, but tests this for tables as well. """ ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()]) thdr = ifd[1].header thdr['FILENAME'] = 'labq01i3q_rawtag.fits' thdu = fits.BinTableHDU(header=thdr) ofd = fits.HDUList(thdu) ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits' # Original header should be unchanged assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits' def test_open(self): # open some existing FITS files: tt = fits.open(self.data('tb.fits')) fd = fits.open(self.data('test0.fits')) # create some local arrays a1 = chararray.array(['abc', 'def', 'xx']) r1 = np.array([11., 12., 13.], dtype=np.float32) # create a table from scratch, using a mixture of columns from existing # tables and locally created arrays: # first, create individual column definitions c1 = fits.Column(name='abc', format='3A', array=a1) c2 = fits.Column(name='def', format='E', array=r1) a3 = np.array([3, 4, 5], dtype='i2') c3 = fits.Column(name='xyz', format='I', array=a3) a4 = np.array([1, 2, 3], dtype='i2') c4 = fits.Column(name='t1', format='I', array=a4) a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8') c5 = fits.Column(name='t2', format='C', array=a5) # Note that X format must be two-D array a6 = np.array([[0], [1], [0]], dtype=np.uint8) c6 = fits.Column(name='t3', format='X', array=a6) a7 = np.array([101, 102, 103], dtype='i4') c7 = fits.Column(name='t4', format='J', array=a7) a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1], [0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) c8 = fits.Column(name='t5', format='11X', array=a8) # second, create a column-definitions object for all columns in a table x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8]) tbhdu = fits.BinTableHDU.from_columns(x) # another way to create a table is by using existing table's # information: x2 = fits.ColDefs(tt[1]) t2 = fits.BinTableHDU.from_columns(x2, nrows=2) ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4') assert comparerecords(t2.data, ra) # the table HDU's data is a subclass of a record array, so we can # access one row like this: assert tbhdu.data[1][0] == a1[1] assert tbhdu.data[1][1] == r1[1] assert tbhdu.data[1][2] == a3[1] assert tbhdu.data[1][3] == a4[1] assert tbhdu.data[1][4] == a5[1] assert (tbhdu.data[1][5] == a6[1].view('bool')).all() assert tbhdu.data[1][6] == a7[1] assert (tbhdu.data[1][7] == a8[1]).all() # and a column like this: assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']" # An alternative way to create a column-definitions object is from an # existing table. xx = fits.ColDefs(tt[1]) # now we write out the newly created table HDU to a FITS file: fout = fits.HDUList(fits.PrimaryHDU()) fout.append(tbhdu) fout.writeto(self.temp('tableout1.fits'), overwrite=True) with fits.open(self.temp('tableout1.fits')) as f2: temp = f2[1].data.field(7) assert (temp[0] == [True, True, False, True, False, True, True, True, False, False, True]).all() # An alternative way to create an output table FITS file: fout2 = fits.open(self.temp('tableout2.fits'), 'append') fout2.append(fd[0]) fout2.append(tbhdu) fout2.close() tt.close() fd.close() def test_binary_table(self): # binary table: t = fits.open(self.data('tb.fits')) assert t[1].header['tform1'] == '1J' info = {'name': ['c1', 'c2', 'c3', 'c4'], 'format': ['1J', '3A', '1E', '1L'], 'unit': ['', '', '', ''], 'null': [-2147483647, '', '', ''], 'bscale': ['', '', 3, ''], 'bzero': ['', '', 0.4, ''], 'disp': ['I11', 'A3', 'G15.7', 'L6'], 'start': ['', '', '', ''], 'dim': ['', '', '', ''], 'coord_inc': ['', '', '', ''], 'coord_type': ['', '', '', ''], 'coord_unit': ['', '', '', ''], 'coord_ref_point': ['', '', '', ''], 'coord_ref_value': ['', '', '', ''], 'time_ref_pos': ['', '', '', '']} assert t[1].columns.info(output=False) == info ra = np.rec.array([ (1, 'abc', 3.7000002861022949, 0), (2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4') assert comparerecords(t[1].data, ra[:2]) # Change scaled field and scale back to the original array t[1].data.field('c4')[0] = 1 t[1].data._scale_back() assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]' # look at data column-wise assert (t[1].data.field(0) == np.array([1, 2])).all() # When there are scaled columns, the raw data are in data._parent t.close() def test_ascii_table(self): # ASCII table a = fits.open(self.data('ascii.fits')) ra1 = np.rec.array([ (10.123000144958496, 37), (5.1999998092651367, 23), (15.609999656677246, 17), (0.0, 0), (345.0, 345)], names='c1, c2') assert comparerecords(a[1].data, ra1) # Test slicing a2 = a[1].data[2:][2:] ra2 = np.rec.array([(345.0, 345)], names='c1, c2') assert comparerecords(a2, ra2) assert (a2.field(1) == np.array([345])).all() ra3 = np.rec.array([ (10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345) ], names='c1, c2') assert comparerecords(a[1].data[::2], ra3) # Test Start Column a1 = chararray.array(['abcd', 'def']) r1 = np.array([11., 12.]) c1 = fits.Column(name='abc', format='A3', start=19, array=a1) c2 = fits.Column(name='def', format='E', start=3, array=r1) c3 = fits.Column(name='t1', format='I', array=[91, 92, 93]) hdu = fits.TableHDU.from_columns([c2, c1, c3]) assert (dict(hdu.data.dtype.fields) == {'abc': (np.dtype('|S3'), 18), 'def': (np.dtype('|S15'), 2), 't1': (np.dtype('|S10'), 21)}) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() # Test Scaling r1 = np.array([11., 12.]) c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3, bzero=0.6) hdu = fits.TableHDU.from_columns([c2]) hdu.writeto(self.temp('toto.fits'), overwrite=True) with open(self.temp('toto.fits')) as f: assert '4.95652173913043548D+00' in f.read() with fits.open(self.temp('toto.fits')) as hdul: assert comparerecords(hdu.data, hdul[1].data) a.close() def test_endianness(self): x = np.ndarray((1,), dtype=object) channelsIn = np.array([3], dtype='uint8') x[0] = channelsIn col = fits.Column(name="Channels", format="PB()", array=x) cols = fits.ColDefs([col]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.name = "RFI" tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) hduL = fits.open(self.temp('testendian.fits')) rfiHDU = hduL['RFI'] data = rfiHDU.data channelsOut = data.field('Channels')[0] assert (channelsIn == channelsOut).all() hduL.close() def test_column_endianness(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77 (Astropy doesn't preserve byte order of non-native order column arrays) """ a = [1., 2., 3., 4.] a1 = np.array(a, dtype='<f8') a2 = np.array(a, dtype='>f8') col1 = fits.Column(name='a', format='D', array=a1) col2 = fits.Column(name='b', format='D', array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) assert (tbhdu.data['a'] == a1).all() assert (tbhdu.data['b'] == a2).all() # Double check that the array is converted to the correct byte-order # for FITS (big-endian). tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) with fits.open(self.temp('testendian.fits')) as hdul: assert (hdul[1].data['a'] == a2).all() assert (hdul[1].data['b'] == a2).all() def test_recarray_to_bintablehdu(self): bright = np.rec.array( [(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], formats='int16,a20,float32,a10', names='order,name,mag,Sp') hdu = fits.BinTableHDU(bright) assert comparerecords(hdu.data, bright) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) assert comparerecords(bright, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu(self): desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'], 'formats': ['int', 'S20', 'float32', 'S10']}) a = np.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu_with_unicode(self): desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'], 'formats': ['int', 'U20', 'float32', 'U10']}) a = np.array([(1, u'Serius', -1.45, u'A1V'), (2, u'Canopys', -0.73, u'F0Ib'), (3, u'Rigil Kent', -0.1, u'G2V')], dtype=desc) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp('toto.fits'), overwrite=True) hdul = fits.open(self.temp('toto.fits')) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_new_table_from_recarray(self): bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib'), (3, 'Rigil Kent', -0.1, 'G2V')], formats='int16,a20,float64,a10', names='order,name,mag,Sp') hdu = fits.TableHDU.from_columns(bright, nrows=2) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns.columns[0].array)) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])) # Ensure I can change the value of one data element and it effects # all of the others. hdu.data[0][0] = 213 assert hdu.data[0][0] == 213 assert hdu.data._coldefs._arrays[0][0] == 213 assert hdu.data._coldefs.columns[0].array[0] == 213 assert hdu.columns._arrays[0][0] == 213 assert hdu.columns.columns[0].array[0] == 213 hdu.data._coldefs._arrays[0][0] = 100 assert hdu.data[0][0] == 100 assert hdu.data._coldefs._arrays[0][0] == 100 assert hdu.data._coldefs.columns[0].array[0] == 100 assert hdu.columns._arrays[0][0] == 100 assert hdu.columns.columns[0].array[0] == 100 hdu.data._coldefs.columns[0].array[0] = 500 assert hdu.data[0][0] == 500 assert hdu.data._coldefs._arrays[0][0] == 500 assert hdu.data._coldefs.columns[0].array[0] == 500 assert hdu.columns._arrays[0][0] == 500 assert hdu.columns.columns[0].array[0] == 500 hdu.columns._arrays[0][0] = 600 assert hdu.data[0][0] == 600 assert hdu.data._coldefs._arrays[0][0] == 600 assert hdu.data._coldefs.columns[0].array[0] == 600 assert hdu.columns._arrays[0][0] == 600 assert hdu.columns.columns[0].array[0] == 600 hdu.columns.columns[0].array[0] = 800 assert hdu.data[0][0] == 800 assert hdu.data._coldefs._arrays[0][0] == 800 assert hdu.data._coldefs.columns[0].array[0] == 800 assert hdu.columns._arrays[0][0] == 800 assert hdu.columns.columns[0].array[0] == 800 assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdu.data[0][1] == 'Serius' assert hdu.data[1][1] == 'Canopys' assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all() assert hdu.data[0][3] == 'A1V' assert hdu.data[1][3] == 'F0Ib' with ignore_warnings(): hdu.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as hdul: assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdul[1].data[0][1] == 'Serius' assert hdul[1].data[1][1] == 'Canopys' assert (hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all() assert hdul[1].data[0][3] == 'A1V' assert hdul[1].data[1][3] == 'F0Ib' del hdul hdu = fits.BinTableHDU.from_columns(bright, nrows=2) tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'), (2, 'Canopys', -0.73, 'F0Ib')], formats='int16,a20,float64,a10', names='order,name,mag,Sp') assert comparerecords(hdu.data, tmp) with ignore_warnings(): hdu.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as hdul: assert comparerecords(hdu.data, hdul[1].data) def test_new_fitsrec(self): """ Tests creating a new FITS_rec object from a multi-field ndarray. """ h = fits.open(self.data('tb.fits')) data = h[1].data new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype) appended = np.append(data, new_data).view(fits.FITS_rec) assert repr(appended).startswith('FITS_rec(') # This test used to check the entire string representation of FITS_rec, # but that has problems between different numpy versions. Instead just # check that the FITS_rec was created, and we'll let subsequent tests # worry about checking values and such def test_appending_a_column(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) counts = np.array([412, 434, 408, 417]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table2.fits')) # Append the rows of table 2 after the rows of table 1 # The column definitions are assumed to be the same # Open the two files we want to append t1 = fits.open(self.temp('table1.fits')) t2 = fits.open(self.temp('table2.fits')) # Get the number of rows in the table from the first file nrows1 = t1[1].data.shape[0] # Get the total number of rows in the resulting appended table nrows = t1[1].data.shape[0] + t2[1].data.shape[0] assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array) # Create a new table that consists of the data from the first table # but has enough space in the ndarray to hold the data from both tables hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows) # For each column in the tables append the data from table 2 after the # data from table 1. for i in range(len(t1[1].columns)): hdu.data.field(i)[nrows1:] = t2[1].data.field(i) hdu.writeto(self.temp('newtable.fits')) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]', '')] assert fits.info(self.temp('newtable.fits'), output=False) == info z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True), ('NGC2', 334, '', z, False), ('NGC3', 308, '', z, True), ('NCG4', 317, '', z, True), ('NGC5', 412, '', z, False), ('NGC6', 434, '', z, True), ('NGC7', 408, '', z, False), ('NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 # Same verification from the file hdul = fits.open(self.temp('newtable.fits')) hdu = hdul[1] hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_adding_a_column(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum'] coldefs1 = coldefs + c5 tbhdu1 = fits.BinTableHDU.from_columns(coldefs1) assert tbhdu1.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag'] z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True), ('NGC2', 334, '', z, False), ('NGC3', 308, '', z, True), ('NCG4', 317, '', z, True)], formats='a10,u4,a10,5f4,l') assert comparerecords(tbhdu1.data, array) def test_merge_tables(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) counts = np.array([412, 434, 408, 417]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target1', format='10A', array=names) c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes1', format='A10') c4 = fits.Column(name='spectrum1', format='5E') c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table2.fits')) # Merge the columns of table 2 after the columns of table 1 # The column names are assumed to be different # Open the two files we want to append t1 = fits.open(self.temp('table1.fits')) t2 = fits.open(self.temp('table2.fits')) hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns) z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False), ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True), ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False), ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) hdu.writeto(self.temp('newtable.fits')) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 30, '4R x 10C', '[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')] assert fits.info(self.temp('newtable.fits'), output=False) == info hdul = fits.open(self.temp('newtable.fits')) hdu = hdul[1] assert (hdu.columns.names == ['target', 'counts', 'notes', 'spectrum', 'flag', 'target1', 'counts1', 'notes1', 'spectrum1', 'flag1']) z = np.array([0., 0., 0., 0., 0.], dtype=np.float32) array = np.rec.array( [('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False), ('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True), ('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False), ('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)], formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l') assert comparerecords(hdu.data, array) # Same verification from the file hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_modify_column_attributes(self): """Regression test for https://github.com/astropy/astropy/issues/996 This just tests one particular use case, but it should apply pretty well to other similar cases. """ NULLS = {'a': 2, 'b': 'b', 'c': 2.3} data = np.array(list(zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'], [2.3, 4.5, 6.7, 8.9])), dtype=[('a', int), ('b', 'S1'), ('c', float)]) b = fits.BinTableHDU(data=data) for col in b.columns: col.null = NULLS[col.name] b.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul: header = hdul[1].header assert header['TNULL1'] == 2 assert header['TNULL2'] == 'b' assert header['TNULL3'] == 2.3 def test_mask_array(self): t = fits.open(self.data('table.fits')) tbdata = t[1].data mask = tbdata.field('V_mag') > 12 newtbdata = tbdata[mask] hdu = fits.BinTableHDU(newtbdata) hdu.writeto(self.temp('newtable.fits')) hdul = fits.open(self.temp('newtable.fits')) # numpy >= 1.12 changes how structured arrays are printed, so we # match to a regex rather than a specific string. expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]" assert re.match(expect, str(hdu.data)) assert re.match(expect, str(hdul[1].data)) t.close() hdul.close() def test_slice_a_row(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) row = t1[1].data[2] assert row['counts'] == 308 a, b, c = row[1:4] assert a == counts[2] assert b == '' assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() row['counts'] = 310 assert row['counts'] == 310 row[1] = 315 assert row['counts'] == 315 assert row[1:4]['counts'] == 315 pytest.raises(KeyError, lambda r: r[1:4]['flag'], row) row[1:4]['counts'] = 300 assert row[1:4]['counts'] == 300 assert row['counts'] == 300 row[1:4][0] = 400 assert row[1:4]['counts'] == 400 row[1:4]['counts'] = 300 assert row[1:4]['counts'] == 300 # Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59 row[1:4][::-1][-1] = 500 assert row[1:4]['counts'] == 500 row[1:4:2][0] = 300 assert row[1:4]['counts'] == 300 pytest.raises(KeyError, lambda r: r[1:4]['flag'], row) assert row[1:4].field(0) == 300 assert row[1:4].field('counts') == 300 pytest.raises(KeyError, row[1:4].field, 'flag') row[1:4].setfield('counts', 500) assert row[1:4].field(0) == 500 pytest.raises(KeyError, row[1:4].setfield, 'flag', False) assert t1[1].data._coldefs._arrays[1][2] == 500 assert t1[1].data._coldefs.columns[1].array[2] == 500 assert t1[1].columns._arrays[1][2] == 500 assert t1[1].columns.columns[1].array[2] == 500 assert t1[1].data[2][1] == 500 t1.close() def test_fits_record_len(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) assert len(t1[1].data[0]) == 5 assert len(t1[1].data[0][0:4]) == 4 assert len(t1[1].data[0][0:5]) == 5 assert len(t1[1].data[0][0:6]) == 5 assert len(t1[1].data[0][0:7]) == 5 assert len(t1[1].data[0][1:4]) == 3 assert len(t1[1].data[0][1:5]) == 4 assert len(t1[1].data[0][1:6]) == 4 assert len(t1[1].data[0][1:7]) == 4 t1.close() def test_add_data_by_rows(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) c1 = fits.Column(name='target', format='10A') c2 = fits.Column(name='counts', format='J', unit='DN') c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L') coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5) # Test assigning data to a tables row using a FITS_record tbhdu.data[0] = tbhdu1.data[0] tbhdu.data[4] = tbhdu1.data[3] # Test assigning data to a tables row using a tuple tbhdu.data[2] = ('NGC1', 312, 'A Note', np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32), True) # Test assigning data to a tables row using a list tbhdu.data[3] = ['JIM1', '33', 'A Note', np.array([1., 2., 3., 4., 5.], dtype=np.float32), True] # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.data._coldefs._arrays[0])) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns.columns[0].array)) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])) assert tbhdu.data[0][1] == 312 assert tbhdu.data._coldefs._arrays[1][0] == 312 assert tbhdu.data._coldefs.columns[1].array[0] == 312 assert tbhdu.columns._arrays[1][0] == 312 assert tbhdu.columns.columns[1].array[0] == 312 assert tbhdu.columns.columns[0].array[0] == 'NGC1' assert tbhdu.columns.columns[2].array[0] == '' assert (tbhdu.columns.columns[3].array[0] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu.columns.columns[4].array[0] == True # nopep8 assert tbhdu.data[3][1] == 33 assert tbhdu.data._coldefs._arrays[1][3] == 33 assert tbhdu.data._coldefs.columns[1].array[3] == 33 assert tbhdu.columns._arrays[1][3] == 33 assert tbhdu.columns.columns[1].array[3] == 33 assert tbhdu.columns.columns[0].array[3] == 'JIM1' assert tbhdu.columns.columns[2].array[3] == 'A Note' assert (tbhdu.columns.columns[3].array[3] == np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all() assert tbhdu.columns.columns[4].array[3] == True # nopep8 def test_assign_multiple_rows_to_table(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) counts = np.array([112, 134, 108, 117]) names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32) tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9) # Assign the 4 rows from the second table to rows 5 thru 8 of the # new table. Note that the last row of the new table will still be # initialized to the default values. tbhdu2.data[4:] = tbhdu.data # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.data._coldefs._arrays[0])) assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.columns.columns[0].array)) assert (id(tbhdu2.data._coldefs.columns[0].array) == id(tbhdu2.columns._arrays[0])) assert tbhdu2.data[0][1] == 312 assert tbhdu2.data._coldefs._arrays[1][0] == 312 assert tbhdu2.data._coldefs.columns[1].array[0] == 312 assert tbhdu2.columns._arrays[1][0] == 312 assert tbhdu2.columns.columns[1].array[0] == 312 assert tbhdu2.columns.columns[0].array[0] == 'NGC1' assert tbhdu2.columns.columns[2].array[0] == '' assert (tbhdu2.columns.columns[3].array[0] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[0] == True # nopep8 assert tbhdu2.data[4][1] == 112 assert tbhdu2.data._coldefs._arrays[1][4] == 112 assert tbhdu2.data._coldefs.columns[1].array[4] == 112 assert tbhdu2.columns._arrays[1][4] == 112 assert tbhdu2.columns.columns[1].array[4] == 112 assert tbhdu2.columns.columns[0].array[4] == 'NGC5' assert tbhdu2.columns.columns[2].array[4] == '' assert (tbhdu2.columns.columns[3].array[4] == np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[4] == False # nopep8 assert tbhdu2.columns.columns[1].array[8] == 0 assert tbhdu2.columns.columns[0].array[8] == '' assert tbhdu2.columns.columns[2].array[8] == '' assert (tbhdu2.columns.columns[3].array[8] == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all() assert tbhdu2.columns.columns[4].array[8] == False # nopep8 def test_verify_data_references(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) # Verify that original ColDefs object has independent Column # objects. assert id(coldefs.columns[0]) != id(c1) # Verify that original ColDefs object has independent ndarray # objects. assert id(coldefs.columns[0].array) != id(names) # Verify that original ColDefs object references the same data # object as the original Column object. assert id(coldefs.columns[0].array) == id(c1.array) assert id(coldefs.columns[0].array) == id(coldefs._arrays[0]) # Verify new HDU has an independent ColDefs object. assert id(coldefs) != id(tbhdu.columns) # Verify new HDU has independent Column objects. assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0]) # Verify new HDU has independent ndarray objects. assert (id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)) # Verify that both ColDefs objects in the HDU reference the same # Coldefs object. assert id(tbhdu.columns) == id(tbhdu.data._coldefs) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.data._coldefs._arrays[0])) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns.columns[0].array)) assert (id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_ndarray(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray)) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.data._coldefs._arrays[0])) assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.columns.columns[0].array)) assert (id(tbhdu1.data._coldefs.columns[0].array) == id(tbhdu1.columns._arrays[0])) # Ensure I can change the value of one data element and it effects # all of the others. tbhdu1.data[0][1] = 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 tbhdu1.data._coldefs.columns[1].array[0] = 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 tbhdu1.columns._arrays[1][0] = 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 tbhdu1.columns.columns[1].array[0] = 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 tbhdu1.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_fits_rec(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][1] = 213 assert tbhdu.data[0][1] == 213 assert tbhdu.data._coldefs._arrays[1][0] == 213 assert tbhdu.data._coldefs.columns[1].array[0] == 213 assert tbhdu.columns._arrays[1][0] == 213 assert tbhdu.columns.columns[1].array[0] == 213 tbhdu.data._coldefs._arrays[1][0] = 100 assert tbhdu.data[0][1] == 100 assert tbhdu.data._coldefs._arrays[1][0] == 100 assert tbhdu.data._coldefs.columns[1].array[0] == 100 assert tbhdu.columns._arrays[1][0] == 100 assert tbhdu.columns.columns[1].array[0] == 100 tbhdu.data._coldefs.columns[1].array[0] = 500 assert tbhdu.data[0][1] == 500 assert tbhdu.data._coldefs._arrays[1][0] == 500 assert tbhdu.data._coldefs.columns[1].array[0] == 500 assert tbhdu.columns._arrays[1][0] == 500 assert tbhdu.columns.columns[1].array[0] == 500 tbhdu.columns._arrays[1][0] = 600 assert tbhdu.data[0][1] == 600 assert tbhdu.data._coldefs._arrays[1][0] == 600 assert tbhdu.data._coldefs.columns[1].array[0] == 600 assert tbhdu.columns._arrays[1][0] == 600 assert tbhdu.columns.columns[1].array[0] == 600 tbhdu.columns.columns[1].array[0] = 800 assert tbhdu.data[0][1] == 800 assert tbhdu.data._coldefs._arrays[1][0] == 800 assert tbhdu.data._coldefs.columns[1].array[0] == 800 assert tbhdu.columns._arrays[1][0] == 800 assert tbhdu.columns.columns[1].array[0] == 800 tbhdu.columns.columns[1].array[0] = 312 tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) t1[1].data[0][1] = 1 fr = t1[1].data assert t1[1].data[0][1] == 1 assert t1[1].data._coldefs._arrays[1][0] == 1 assert t1[1].data._coldefs.columns[1].array[0] == 1 assert t1[1].columns._arrays[1][0] == 1 assert t1[1].columns.columns[1].array[0] == 1 assert fr[0][1] == 1 assert fr._coldefs._arrays[1][0] == 1 assert fr._coldefs.columns[1].array[0] == 1 fr._coldefs.columns[1].array[0] = 312 tbhdu1 = fits.BinTableHDU.from_columns(fr) i = 0 for row in tbhdu1.data: for j in range(len(row)): if isinstance(row[j], np.ndarray): assert (row[j] == tbhdu.data[i][j]).all() else: assert row[j] == tbhdu.data[i][j] i = i + 1 tbhdu1.data[0][1] = 213 assert t1[1].data[0][1] == 312 assert t1[1].data._coldefs._arrays[1][0] == 312 assert t1[1].data._coldefs.columns[1].array[0] == 312 assert t1[1].columns._arrays[1][0] == 312 assert t1[1].columns.columns[1].array[0] == 312 assert fr[0][1] == 312 assert fr._coldefs._arrays[1][0] == 312 assert fr._coldefs.columns[1].array[0] == 312 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 t1[1].data[0][1] = 10 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 666 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 666 assert tbhdu1.data._coldefs._arrays[1][0] == 666 assert tbhdu1.data._coldefs.columns[1].array[0] == 666 assert tbhdu1.columns._arrays[1][0] == 666 assert tbhdu1.columns.columns[1].array[0] == 666 t1.close() def test_bin_table_hdu_constructor(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) hdu = fits.BinTableHDU(tbhdu1.data) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns.columns[0].array)) assert (id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])) # Verify that the references in the original HDU are the same as the # references in the new HDU. assert (id(tbhdu1.data._coldefs.columns[0].array) == id(hdu.data._coldefs._arrays[0])) # Verify that a change in the new HDU is reflected in both the new # and original HDU. hdu.data[0][1] = 213 assert hdu.data[0][1] == 213 assert hdu.data._coldefs._arrays[1][0] == 213 assert hdu.data._coldefs.columns[1].array[0] == 213 assert hdu.columns._arrays[1][0] == 213 assert hdu.columns.columns[1].array[0] == 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 hdu.data._coldefs._arrays[1][0] = 100 assert hdu.data[0][1] == 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 hdu.data._coldefs.columns[1].array[0] = 500 assert hdu.data[0][1] == 500 assert hdu.data._coldefs._arrays[1][0] == 500 assert hdu.data._coldefs.columns[1].array[0] == 500 assert hdu.columns._arrays[1][0] == 500 assert hdu.columns.columns[1].array[0] == 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 hdu.columns._arrays[1][0] = 600 assert hdu.data[0][1] == 600 assert hdu.data._coldefs._arrays[1][0] == 600 assert hdu.data._coldefs.columns[1].array[0] == 600 assert hdu.columns._arrays[1][0] == 600 assert hdu.columns.columns[1].array[0] == 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 hdu.columns.columns[1].array[0] = 800 assert hdu.data[0][1] == 800 assert hdu.data._coldefs._arrays[1][0] == 800 assert hdu.data._coldefs.columns[1].array[0] == 800 assert hdu.columns._arrays[1][0] == 800 assert hdu.columns.columns[1].array[0] == 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 def test_constructor_name_arg(self): """testConstructorNameArg Passing name='...' to the BinTableHDU and TableHDU constructors should set the .name attribute and 'EXTNAME' header keyword, and override any name in an existing 'EXTNAME' value. """ for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.name == '' assert 'EXTNAME' not in hdu.header hdu.name = 'FOO' assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # Passing name to constructor hdu = hducls(name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # And overriding a header with a different extname hdr = fits.Header() hdr['EXTNAME'] = 'EVENTS' hdu = hducls(header=hdr, name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' def test_constructor_ver_arg(self): for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.ver == 1 assert 'EXTVER' not in hdu.header hdu.ver = 2 assert hdu.ver == 2 assert hdu.header['EXTVER'] == 2 # Passing name to constructor hdu = hducls(ver=3) assert hdu.ver == 3 assert hdu.header['EXTVER'] == 3 # And overriding a header with a different extver hdr = fits.Header() hdr['EXTVER'] = 4 hdu = hducls(header=hdr, ver=5) assert hdu.ver == 5 assert hdu.header['EXTVER'] == 5 def test_unicode_colname(self): """ Regression test for https://github.com/astropy/astropy/issues/5204 "Handle unicode FITS BinTable column names on Python 2" """ col = fits.Column(name=u'spam', format='E', array=[42.]) # This used to raise a TypeError, now it works fits.BinTableHDU.from_columns([col]) def test_bin_table_with_logical_array(self): c1 = fits.Column(name='flag', format='2L', array=[[True, False], [False, True]]) coldefs = fits.ColDefs([c1]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) assert (tbhdu1.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu1.data.field('flag')[1] == np.array([False, True], dtype=bool)).all() tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data) assert (tbhdu.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu.data.field('flag')[1] == np.array([False, True], dtype=bool)).all() def test_fits_rec_column_access(self): t = fits.open(self.data('table.fits')) tbdata = t[1].data assert (tbdata.V_mag == tbdata.field('V_mag')).all() assert (tbdata.V_mag == tbdata['V_mag']).all() t.close() def test_table_with_zero_width_column(self): hdul = fits.open(self.data('zerowidth.fits')) tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM' assert 'ORBPARM' in tbhdu.columns.names # The ORBPARM column should not be in the data, though the data should # be readable assert 'ORBPARM' in tbhdu.data.names assert 'ORBPARM' in tbhdu.data.dtype.names # Verify that some of the data columns are still correctly accessible # by name assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16' assert comparefloats( tbhdu.data[0]['STABXYZ'], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64)) assert tbhdu.data[0]['NOSTA'] == 1 assert tbhdu.data[0]['MNTSTA'] == 0 assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT' assert comparefloats( tbhdu.data[-1]['STABXYZ'], np.array([0.0, 0.0, 0.0], dtype=np.float64)) assert tbhdu.data[-1]['NOSTA'] == 29 assert tbhdu.data[-1]['MNTSTA'] == 0 hdul.writeto(self.temp('newtable.fits')) hdul.close() hdul = fits.open(self.temp('newtable.fits')) tbhdu = hdul[2] # Verify that the previous tests still hold after writing assert 'ORBPARM' in tbhdu.columns.names assert 'ORBPARM' in tbhdu.data.names assert 'ORBPARM' in tbhdu.data.dtype.names assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16' assert comparefloats( tbhdu.data[0]['STABXYZ'], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64)) assert tbhdu.data[0]['NOSTA'] == 1 assert tbhdu.data[0]['MNTSTA'] == 0 assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT' assert comparefloats( tbhdu.data[-1]['STABXYZ'], np.array([0.0, 0.0, 0.0], dtype=np.float64)) assert tbhdu.data[-1]['NOSTA'] == 29 assert tbhdu.data[-1]['MNTSTA'] == 0 hdul.close() def test_string_column_padding(self): a = ['img1', 'img2', 'img3a', 'p'] s = 'img1\x00\x00\x00\x00\x00\x00' \ 'img2\x00\x00\x00\x00\x00\x00' \ 'img3a\x00\x00\x00\x00\x00' \ 'p\x00\x00\x00\x00\x00\x00\x00\x00\x00' acol = fits.Column(name='MEMNAME', format='A10', array=chararray.array(a)) ahdu = fits.BinTableHDU.from_columns([acol]) assert ahdu.data.tostring().decode('raw-unicode-escape') == s ahdu.writeto(self.temp('newtable.fits')) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].data.tostring().decode('raw-unicode-escape') == s assert (hdul[1].data['MEMNAME'] == a).all() del hdul ahdu = fits.TableHDU.from_columns([acol]) with ignore_warnings(): ahdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert (hdul[1].data.tostring().decode('raw-unicode-escape') == s.replace('\x00', ' ')) assert (hdul[1].data['MEMNAME'] == a).all() ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy()) del hdul # Now serialize once more as a binary table; padding bytes should # revert to zeroes ahdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].data.tostring().decode('raw-unicode-escape') == s assert (hdul[1].data['MEMNAME'] == a).all() def test_multi_dimensional_columns(self): """ Tests the multidimensional column implementation with both numeric arrays and string arrays. """ data = np.rec.array( [([0, 1, 2, 3, 4, 5], 'row1' * 2), ([6, 7, 8, 9, 0, 1], 'row2' * 2), ([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8') thdu = fits.BinTableHDU.from_columns(data) # Modify the TDIM fields to my own specification thdu.header['TDIM1'] = '(2,3)' thdu.header['TDIM2'] = '(4,2)' thdu.writeto(self.temp('newtable.fits')) with fits.open(self.temp('newtable.fits')) as hdul: thdu = hdul[1] c1 = thdu.data.field(0) c2 = thdu.data.field(1) assert c1.shape == (3, 3, 2) assert c2.shape == (3, 2) assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [0, 1]], [[2, 3], [4, 5], [6, 7]]])).all() assert (c2 == np.array([['row1', 'row1'], ['row2', 'row2'], ['row3', 'row3']])).all() del c1 del c2 del thdu del hdul # Test setting the TDIMn header based on the column data data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)]) data['x'] = 1, 2, 3 data['s'] = 'ok' with ignore_warnings(): fits.writeto(self.temp('newtable.fits'), data, overwrite=True) t = fits.getdata(self.temp('newtable.fits')) assert t.field(1).dtype.str[-1] == '5' assert t.field(1).shape == (3, 4) # Like the previous test, but with an extra dimension (a bit more # complicated) data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))]) data['x'] = 1, 2, 3 data['s'] = 'ok' del t with ignore_warnings(): fits.writeto(self.temp('newtable.fits'), data, overwrite=True) t = fits.getdata(self.temp('newtable.fits')) assert t.field(1).dtype.str[-1] == '5' assert t.field(1).shape == (3, 4, 3) def test_bin_table_init_from_string_array_column(self): """ Tests two ways of creating a new `BinTableHDU` from a column of string arrays. This tests for a couple different regressions, and ensures that both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work equivalently. Some of this is redundant with the following test, but checks some subtly different cases. """ data = [[b'abcd', b'efgh'], [b'ijkl', b'mnop'], [b'qrst', b'uvwx']] arr = np.array([(data,), (data,), (data,), (data,), (data,)], dtype=[('S', '(3, 2)S4')]) with catch_warnings() as w: tbhdu1 = fits.BinTableHDU(data=arr) assert len(w) == 0 def test_dims_and_roundtrip(tbhdu): assert tbhdu.data['S'].shape == (5, 3, 2) assert tbhdu.data['S'].dtype.str.endswith('U4') tbhdu.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header['TDIM1'] == '(4,2,3)' assert tbhdu2.data['S'].shape == (5, 3, 2) assert tbhdu.data['S'].dtype.str.endswith('U4') assert np.all(tbhdu2.data['S'] == tbhdu.data['S']) test_dims_and_roundtrip(tbhdu1) tbhdu2 = fits.BinTableHDU.from_columns(arr) test_dims_and_roundtrip(tbhdu2) def test_columns_with_truncating_tdim(self): """ According to the FITS standard (section 7.3.2): If the number of elements in the array implied by the TDIMn is less than the allocated size of the ar- ray in the FITS file, then the unused trailing elements should be interpreted as containing undefined fill values. *deep sigh* What this means is if a column has a repeat count larger than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)', but TFORM1 = 6I), then instead of this being an outright error we are to take the first 4 elements as implied by the TDIM and ignore the additional two trailing elements. """ # It's hard to even successfully create a table like this. I think # it *should* be difficult, but once created it should at least be # possible to read. arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']] arr2 = [1, 2, 3, 4, 5] arr = np.array([(arr1, arr2), (arr1, arr2)], dtype=[('a', '(3, 2)S2'), ('b', '5i8')]) tbhdu = fits.BinTableHDU(data=arr) tbhdu.writeto(self.temp('test.fits')) with open(self.temp('test.fits'), 'rb') as f: raw_bytes = f.read() # Artificially truncate TDIM in the header; this seems to be the # easiest way to do this while getting around Astropy's insistence on the # data and header matching perfectly; again, we have no interest in # making it possible to write files in this format, only read them with open(self.temp('test.fits'), 'wb') as f: f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)')) with fits.open(self.temp('test.fits')) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header['TDIM1'] == '(2,2,2)' assert tbhdu2.header['TFORM1'] == '12A' for row in tbhdu2.data: assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']]) assert np.all(row['b'] == [1, 2, 3, 4, 5]) def test_string_array_round_trip(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201""" data = [['abc', 'def', 'ghi'], ['jkl', 'mno', 'pqr'], ['stu', 'vwx', 'yz ']] recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3']) t = fits.BinTableHDU(data=recarr) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert 'TDIM1' in h[1].header assert h[1].header['TDIM1'] == '(3,3,3)' assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert (h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], 'ascii')).all() with fits.open(self.temp('test.fits')) as h: # Access the data; I think this is necessary to exhibit the bug # reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201 h[1].data[:] h.writeto(self.temp('test2.fits')) with fits.open(self.temp('test2.fits')) as h: assert 'TDIM1' in h[1].header assert h[1].header['TDIM1'] == '(3,3,3)' assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert (h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], 'ascii')).all() def test_new_table_with_nd_column(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/3 """ arra = np.array(['a', 'b'], dtype='|S1') arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2') arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) cols = [ fits.Column(name='str', format='1A', array=arra), fits.Column(name='strarray', format='4A', dim='(2,2)', array=arrb), fits.Column(name='intarray', format='4I', dim='(2, 2)', array=arrc) ] hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: # Need to force string arrays to byte arrays in order to compare # correctly on Python 3 assert (h[1].data['str'].encode('ascii') == arra).all() assert (h[1].data['strarray'].encode('ascii') == arrb).all() assert (h[1].data['intarray'] == arrc).all() def test_mismatched_tform_and_tdim(self): """Normally the product of the dimensions listed in a TDIMn keyword must be less than or equal to the repeat count in the TFORMn keyword. This tests that this works if less than (treating the trailing bytes as unspecified fill values per the FITS standard) and fails if the dimensions specified by TDIMn are greater than the repeat count. """ arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]) cols = [fits.Column(name='a', format='20I', dim='(2,2)', array=arra), fits.Column(name='b', format='4I', dim='(2,2)', array=arrb)] # The first column has the mismatched repeat count hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].header['TFORM1'] == '20I' assert h[1].header['TFORM2'] == '4I' assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)' assert (h[1].data['a'] == arra).all() assert (h[1].data['b'] == arrb).all() assert h[1].data.itemsize == 48 # 16-bits times 24 # If dims is more than the repeat count in the format specifier raise # an error pytest.raises(VerifyError, fits.Column, name='a', format='2I', dim='(2,2)', array=arra) def test_tdim_of_size_one(self): """Regression test for https://github.com/astropy/astropy/pull/3580""" hdulist = fits.open(self.data('tdim.fits')) assert hdulist[1].data['V_mag'].shape == (3, 1, 1) def test_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52""" f = fits.open(self.data('table.fits')) data = f[1].data targets = data.field('target') s = data[:] assert (s.field('target') == targets).all() for n in range(len(targets) + 2): s = data[:n] assert (s.field('target') == targets[:n]).all() s = data[n:] assert (s.field('target') == targets[n:]).all() s = data[::2] assert (s.field('target') == targets[::2]).all() s = data[::-1] assert (s.field('target') == targets[::-1]).all() def test_array_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55""" f = fits.open(self.data('table.fits')) data = f[1].data s1 = data[data['target'] == 'NGC1001'] s2 = data[np.where(data['target'] == 'NGC1001')] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) def test_array_broadcasting(self): """ Regression test for https://github.com/spacetelescope/PyFITS/pull/48 """ with fits.open(self.data('table.fits')) as hdu: data = hdu[1].data data['V_mag'] = 0 assert np.all(data['V_mag'] == 0) data['V_mag'] = 1 assert np.all(data['V_mag'] == 1) for container in (list, tuple, np.array): data['V_mag'] = container([1, 2, 3]) assert np.array_equal(data['V_mag'], np.array([1, 2, 3])) def test_array_slicing_readonly(self): """ Like test_array_slicing but with the file opened in 'readonly' mode. Regression test for a crash when slicing readonly memmap'd tables. """ f = fits.open(self.data('table.fits'), mode='readonly') data = f[1].data s1 = data[data['target'] == 'NGC1001'] s2 = data[np.where(data['target'] == 'NGC1001')] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) def test_dump_load_round_trip(self): """ A simple test of the dump/load methods; dump the data, column, and header files and try to reload the table from them. """ hdul = fits.open(self.data('table.fits')) tbhdu = hdul[1] datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) # Double check that the headers are equivalent assert str(tbhdu.header) == str(new_tbhdu.header) def test_dump_load_array_colums(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/22 Ensures that a table containing a multi-value array column can be dumped and loaded successfully. """ data = np.rec.array([('a', [1, 2, 3, 4], 0.1), ('b', [5, 6, 7, 8], 0.2)], formats='a1,4i4,f8') tbhdu = fits.BinTableHDU.from_columns(data) datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) assert str(tbhdu.header) == str(new_tbhdu.header) def test_load_guess_format(self): """ Tests loading a table dump with no supplied coldefs or header, so that the table format has to be guessed at. There is of course no exact science to this; the table that's produced simply uses sensible guesses for that format. Ideally this should never have to be used. """ # Create a table containing a variety of data types. a0 = np.array([False, True, False], dtype=bool) c0 = fits.Column(name='c0', format='L', array=a0) # Format X currently not supported by the format # a1 = np.array([[0], [1], [0]], dtype=np.uint8) # c1 = fits.Column(name='c1', format='X', array=a1) a2 = np.array([1, 128, 255], dtype=np.uint8) c2 = fits.Column(name='c2', format='B', array=a2) a3 = np.array([-30000, 1, 256], dtype=np.int16) c3 = fits.Column(name='c3', format='I', array=a3) a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32) c4 = fits.Column(name='c4', format='J', array=a4) a5 = np.array(['a', 'abc', 'ab']) c5 = fits.Column(name='c5', format='A3', array=a5) a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64) c6 = fits.Column(name='c6', format='D', array=a6) a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128) c7 = fits.Column(name='c7', format='M', array=a7) a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32) c8 = fits.Column(name='c8', format='PJ()', array=a8) tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8]) datafile = self.temp('data.txt') tbhdu.dump(datafile) new_tbhdu = fits.BinTableHDU.load(datafile) # In this particular case the record data at least should be equivalent assert comparerecords(tbhdu.data, new_tbhdu.data) def test_attribute_field_shadowing(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86 Numpy recarray objects have a poorly-considered feature of allowing field access by attribute lookup. However, if a field name conincides with an existing attribute/method of the array, the existing name takes precence (making the attribute-based field lookup completely unreliable in general cases). This ensures that any FITS_rec attributes still work correctly even when there is a field with the same name as that attribute. """ c1 = fits.Column(name='names', format='I', array=[1]) c2 = fits.Column(name='formats', format='I', array=[2]) c3 = fits.Column(name='other', format='I', array=[3]) t = fits.BinTableHDU.from_columns([c1, c2, c3]) assert t.data.names == ['names', 'formats', 'other'] assert t.data.formats == ['I'] * 3 assert (t.data['names'] == [1]).all() assert (t.data['formats'] == [2]).all() assert (t.data.other == [3]).all() def test_table_from_bool_fields(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113 Tests creating a table from a recarray containing numpy.bool columns. """ array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1') thdu = fits.BinTableHDU.from_columns(array) assert thdu.columns.formats == ['L', 'L'] assert comparerecords(thdu.data, array) # Test round trip thdu.writeto(self.temp('table.fits')) data = fits.getdata(self.temp('table.fits'), ext=1) assert thdu.columns.formats == ['L', 'L'] assert comparerecords(data, array) def test_table_from_bool_fields2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215 Tests the case where a multi-field ndarray (not a recarray) containing a bool field is used to initialize a `BinTableHDU`. """ arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')]) hdu = fits.BinTableHDU(data=arr) assert (hdu.data['a'] == arr['a']).all() def test_bool_column_update(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139""" c1 = fits.Column('F1', 'L', array=[True, False]) c2 = fits.Column('F2', 'L', array=[False, True]) thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2])) thdu.writeto(self.temp('table.fits')) with fits.open(self.temp('table.fits'), mode='update') as hdul: hdul[1].data['F1'][1] = True hdul[1].data['F2'][0] = True with fits.open(self.temp('table.fits')) as hdul: assert (hdul[1].data['F1'] == [True, True]).all() assert (hdul[1].data['F2'] == [True, True]).all() def test_missing_tnull(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197""" c = fits.Column('F1', 'A3', null='---', array=np.array(['1.0', '2.0', '---', '3.0']), ascii=True) table = fits.TableHDU.from_columns([c]) table.writeto(self.temp('test.fits')) # Now let's delete the TNULL1 keyword, making this essentially # unreadable with fits.open(self.temp('test.fits'), mode='update') as h: h[1].header['TFORM1'] = 'E3' del h[1].header['TNULL1'] with fits.open(self.temp('test.fits')) as h: pytest.raises(ValueError, lambda: h[1].data['F1']) try: with fits.open(self.temp('test.fits')) as h: h[1].data['F1'] except ValueError as e: assert str(e).endswith( "the header may be missing the necessary TNULL1 " "keyword or the table contains invalid data") def test_blank_field_zero(self): """Regression test for https://github.com/astropy/astropy/issues/5134 Blank values in numerical columns of ASCII tables should be replaced with zeros, so they can be loaded into numpy arrays. When a TNULL value is set and there are blank fields not equal to that value, they should be replaced with zeros. """ # Test an integer column with blank string as null nullval1 = u' ' c1 = fits.Column('F1', format='I8', null=nullval1, array=np.array([0, 1, 2, 3, 4]), ascii=True) table = fits.TableHDU.from_columns([c1]) table.writeto(self.temp('ascii_null.fits')) # Replace the 1st col, 3rd row, with a null field. with open(self.temp('ascii_null.fits'), mode='r+') as h: nulled = h.read().replace(u'2 ', u' ') h.seek(0) h.write(nulled) with fits.open(self.temp('ascii_null.fits'), memmap=True) as f: assert f[1].data[2][0] == 0 # Test a float column with a null value set and blank fields. nullval2 = 'NaN' c2 = fits.Column('F1', format='F12.8', null=nullval2, array=np.array([1.0, 2.0, 3.0, 4.0]), ascii=True) table = fits.TableHDU.from_columns([c2]) table.writeto(self.temp('ascii_null2.fits')) # Replace the 1st col, 3rd row, with a null field. with open(self.temp('ascii_null2.fits'), mode='r+') as h: nulled = h.read().replace(u'3.00000000', u' ') h.seek(0) h.write(nulled) with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f: # (Currently it should evaluate to 0.0, but if a TODO in fitsrec is # completed, then it should evaluate to NaN.) assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0]) def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column('mag', format='E', array=arr) assert (arr == col.array).all() def test_table_none(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data('tb.fits')) as h: h[1].data h[1].data = None assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 h[1].writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].header['NAXIS'] == 2 assert h[1].header['NAXIS1'] == 12 assert h[1].header['NAXIS2'] == 0 assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 def test_unncessary_table_load(self): """Test unnecessary parsing and processing of FITS tables when writing direclty from one FITS file to a new file without first reading the data for user manipulation. In other words, it should be possible to do a direct copy of the raw data without unecessary processing of the data. """ with fits.open(self.data('table.fits')) as h: h[1].writeto(self.temp('test.fits')) # Since this was a direct copy the h[1].data attribute should not have # even been accessed (since this means the data was read and parsed) assert 'data' not in h[1].__dict__ with fits.open(self.data('table.fits')) as h1: with fits.open(self.temp('test.fits')) as h2: assert str(h1[1].header) == str(h2[1].header) assert comparerecords(h1[1].data, h2[1].data) def test_table_from_columns_of_other_table(self): """Tests a rare corner case where the columns of an existing table are used to create a new table with the new_table function. In this specific case, however, the existing table's data has not been read yet, so new_table has to get at it through the Delayed proxy. Note: Although this previously tested new_table it now uses BinTableHDU.from_columns directly, around which new_table is a mere wrapper. """ hdul = fits.open(self.data('table.fits')) # Make sure the column array is in fact delayed... assert isinstance(hdul[1].columns._arrays[0], Delayed) # Create a new table... t = fits.BinTableHDU.from_columns(hdul[1].columns) # The original columns should no longer be delayed... assert not isinstance(hdul[1].columns._arrays[0], Delayed) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul2: assert comparerecords(hdul[1].data, hdul2[1].data) def test_bintable_to_asciitable(self): """Tests initializing a TableHDU with the data from a BinTableHDU.""" with fits.open(self.data('tb.fits')) as hdul: tbdata = hdul[1].data tbhdu = fits.TableHDU(data=tbdata) with ignore_warnings(): tbhdu.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul2: tbdata2 = hdul2[1].data assert np.all(tbdata['c1'] == tbdata2['c1']) assert np.all(tbdata['c2'] == tbdata2['c2']) # c3 gets converted from float32 to float64 when writing # test.fits, so cast to float32 before testing that the correct # value is retrieved assert np.all(tbdata['c3'].astype(np.float32) == tbdata2['c3'].astype(np.float32)) # c4 is a boolean column in the original table; we want ASCII # columns to convert these to columns of 'T'/'F' strings assert np.all(np.where(tbdata['c4'], 'T', 'F') == tbdata2['c4']) def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data('tb.fits')) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data('ascii.fits')) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data('random_groups.fits')) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data('zerowidth.fits')) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' with ignore_warnings(): zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) assert comparerecords(zwc_pl, zwc[2].data) def test_zero_length_table(self): array = np.array([], dtype=[ ('a', 'i8'), ('b', 'S64'), ('c', ('i4', (3, 2)))]) hdu = fits.BinTableHDU(array) assert hdu.header['NAXIS1'] == 96 assert hdu.header['NAXIS2'] == 0 assert hdu.header['TDIM3'] == '(2,3)' field = hdu.data.field(1) assert field.shape == (0,) def test_dim_column_byte_order_mismatch(self): """ When creating a table column with non-trivial TDIMn, and big-endian array data read from an existing FITS file, the data should not be unnecessarily byteswapped. Regression test for https://github.com/astropy/astropy/issues/3561 """ data = fits.getdata(self.data('random_groups.fits'))['DATA'] col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)', format='1152E') thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert np.all(hdul[1].data['TEST'] == data) def test_fits_rec_from_existing(self): """ Tests creating a `FITS_rec` object with `FITS_rec.from_columns` from an existing `FITS_rec` object read from a FITS file. This ensures that the per-column arrays are updated properly. Regression test for https://github.com/spacetelescope/PyFITS/issues/99 """ # The use case that revealed this problem was trying to create a new # table from an existing table, but with additional rows so that we can # append data from a second table (with the same column structure) data1 = fits.getdata(self.data('tb.fits')) data2 = fits.getdata(self.data('tb.fits')) nrows = len(data1) + len(data2) merged = fits.FITS_rec.from_columns(data1, nrows=nrows) merged[len(data1):] = data2 mask = merged['c1'] > 1 masked = merged[mask] # The test table only has two rows, only the second of which is > 1 for # the 'c1' column assert comparerecords(data1[1:], masked[:1]) assert comparerecords(data1[1:], masked[1:]) # Double check that the original data1 table hasn't been affected by # its use in creating the "merged" table assert comparerecords(data1, fits.getdata(self.data('tb.fits'))) def test_update_string_column_inplace(self): """ Regression test for https://github.com/astropy/astropy/issues/4452 Ensure that changes to values in a string column are saved when a file is opened in ``mode='update'``. """ data = np.array([('abc',)], dtype=[('a', 'S3')]) fits.writeto(self.temp('test.fits'), data) with fits.open(self.temp('test.fits'), mode='update') as hdul: hdul[1].data['a'][0] = 'XYZ' assert hdul[1].data['a'][0] == 'XYZ' with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].data['a'][0] == 'XYZ' # Test update but with a non-trivial TDIMn data = np.array([([['abc', 'def', 'geh'], ['ijk', 'lmn', 'opq']],)], dtype=[('a', ('S3', (2, 3)))]) fits.writeto(self.temp('test2.fits'), data) expected = [['abc', 'def', 'geh'], ['ijk', 'XYZ', 'opq']] with fits.open(self.temp('test2.fits'), mode='update') as hdul: assert hdul[1].header['TDIM1'] == '(3,3,2)' # Note: Previously I wrote data['a'][0][1, 1] to address # the single row. However, this is broken for chararray because # data['a'][0] does *not* return a view of the original array--this # is a bug in chararray though and not a bug in any FITS-specific # code so we'll roll with it for now... # (by the way the bug in question is fixed in newer Numpy versions) hdul[1].data['a'][0, 1, 1] = 'XYZ' assert np.all(hdul[1].data['a'][0] == expected) with fits.open(self.temp('test2.fits')) as hdul: assert hdul[1].header['TDIM1'] == '(3,3,2)' assert np.all(hdul[1].data['a'][0] == expected) @pytest.mark.skipif(str('not HAVE_OBJGRAPH')) def test_reference_leak(self): """Regression test for https://github.com/astropy/astropy/pull/520""" def readfile(filename): with fits.open(filename) as hdul: data = hdul[1].data.copy() for colname in data.dtype.names: data[colname] with _refcounting('FITS_rec'): readfile(self.data('memtest.fits')) @pytest.mark.skipif(str('not HAVE_OBJGRAPH')) def test_reference_leak2(self, tmpdir): """ Regression test for https://github.com/astropy/astropy/pull/4539 This actually re-runs a small set of tests that I found, during careful testing, exhibited the reference leaks fixed by #4539, but now with reference counting around each test to ensure that the leaks are fixed. """ from .test_core import TestCore from .test_connect import TestMultipleHDU t1 = TestCore() t1.setup() try: with _refcounting('FITS_rec'): t1.test_add_del_columns2() finally: t1.teardown() del t1 t2 = self.__class__() for test_name in ['test_recarray_to_bintablehdu', 'test_numpy_ndarray_to_bintablehdu', 'test_new_table_from_recarray', 'test_new_fitsrec']: t2.setup() try: with _refcounting('FITS_rec'): getattr(t2, test_name)() finally: t2.teardown() del t2 t3 = TestMultipleHDU() t3.setup_class() try: with _refcounting('FITS_rec'): t3.test_read(tmpdir) finally: t3.teardown_class() del t3 def test_dump_clobber_vs_overwrite(self): with fits.open(self.data('table.fits')) as hdul: tbhdu = hdul[1] datafile = self.temp('data.txt') cdfile = self.temp('coldefs.txt') hfile = self.temp('header.txt') tbhdu.dump(datafile, cdfile, hfile) tbhdu.dump(datafile, cdfile, hfile, overwrite=True) with catch_warnings(AstropyDeprecationWarning) as warning_lines: tbhdu.dump(datafile, cdfile, hfile, clobber=True) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"clobber" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "overwrite" instead.') def test_pseudo_unsigned_ints(self): """ Tests updating a table column containing pseudo-unsigned ints. """ data = np.array([1, 2, 3], dtype=np.uint32) col = fits.Column(name='A', format='1J', bzero=2**31, array=data) thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp('test.fits')) # Test that the file wrote out correctly with fits.open(self.temp('test.fits'), uint=True) as hdul: hdu = hdul[1] assert 'TZERO1' in hdu.header assert hdu.header['TZERO1'] == 2**31 assert hdu.data['A'].dtype == np.dtype('uint32') assert np.all(hdu.data['A'] == data) # Test updating the unsigned int data hdu.data['A'][0] = 99 hdu.writeto(self.temp('test2.fits')) with fits.open(self.temp('test2.fits'), uint=True) as hdul: hdu = hdul[1] assert 'TZERO1' in hdu.header assert hdu.header['TZERO1'] == 2**31 assert hdu.data['A'].dtype == np.dtype('uint32') assert np.all(hdu.data['A'] == [99, 2, 3]) def test_column_with_scaling(self): """Check that a scaled column if correctly saved once it is modified. Regression test for https://github.com/astropy/astropy/issues/6887 """ c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'), format='1I', bscale=1, bzero=32768) S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])]) # Change value in memory S[1].data['c1'][0] = 2 S.writeto(self.temp("a.fits")) assert S[1].data['c1'] == 2 # Read and change value in memory X = fits.open(self.temp("a.fits")) X[1].data['c1'][0] = 10 assert X[1].data['c1'][0] == 10 # Write back to file X.writeto(self.temp("b.fits")) # Now check the file with fits.open(self.temp("b.fits")) as hdul: assert hdul[1].data['c1'][0] == 10 @contextlib.contextmanager def _refcounting(type_): """ Perform the body of a with statement with reference counting for the given type (given by class name)--raises an assertion error if there are more unfreed objects of the given type than when we entered the with statement. """ gc.collect() refcount = len(objgraph.by_type(type_)) yield refcount gc.collect() assert len(objgraph.by_type(type_)) <= refcount, \ "More {0!r} objects still in memory than before." class TestVLATables(FitsTestCase): """Tests specific to tables containing variable-length arrays.""" def test_variable_length_columns(self): def test(format_code): col = fits.Column(name='QUAL_SPE', format=format_code, array=[[0] * 1571] * 225) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) with ignore_warnings(): hdu_list.writeto(self.temp('toto.fits'), overwrite=True) with fits.open(self.temp('toto.fits')) as toto: q = toto[1].data.field('QUAL_SPE') assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all() assert toto[1].columns[0].format.endswith('J(1571)') for code in ('PJ()', 'QJ()'): test(code) def test_extend_variable_length_array(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54""" def test(format_code): arr = [[1] * 10] * 10 col1 = fits.Column(name='TESTVLF', format=format_code, array=arr) col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10) tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15) # This asserts that the normal 'scalar' column's length was extended assert len(tb_hdu.data['TESTSCA']) == 15 # And this asserts that the VLF column was extended in the same manner assert len(tb_hdu.data['TESTVLF']) == 15 # We can't compare the whole array since the _VLF is an array of # objects, but comparing just the edge case rows should suffice assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all() assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all() assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all() assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all() for code in ('PJ()', 'QJ()'): test(code) def test_variable_length_table_format_pd_from_object_array(self): def test(format_code): a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], 'O') acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) with ignore_warnings(): tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith('D(2)') for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ('PD()', 'QD()'): test(code) def test_variable_length_table_format_pd_from_list(self): def test(format_code): a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])] acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) with ignore_warnings(): tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith('D(2)') for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ('PD()', 'QD()'): test(code) def test_variable_length_table_format_pa_from_object_array(self): def test(format_code): a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']), np.array(['f'])], 'O') acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) with ignore_warnings(): tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].columns[0].format.endswith('A(3)') for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ('PA()', 'QA()'): test(code) def test_variable_length_table_format_pa_from_list(self): def test(format_code): a = ['a', 'ab', 'abc'] acol = fits.Column(name='testa', format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) with ignore_warnings(): tbhdu.writeto(self.temp('newtable.fits'), overwrite=True) with fits.open(self.temp('newtable.fits')) as hdul: assert hdul[1].columns[0].format.endswith('A(3)') for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ('PA()', 'QA()'): test(code) def test_getdata_vla(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200""" def test(format_code): col = fits.Column(name='QUAL_SPE', format=format_code, array=[np.arange(1572)] * 225) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) with ignore_warnings(): hdu_list.writeto(self.temp('toto.fits'), overwrite=True) data = fits.getdata(self.temp('toto.fits')) # Need to compare to the original data row by row since the FITS_rec # returns an array of _VLA objects for row_a, row_b in zip(data['QUAL_SPE'], col.array): assert (row_a == row_b).all() for code in ('PJ()', 'QJ()'): test(code) def test_copy_vla(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/47 """ # Make a file containing a couple of VLA tables arr1 = [np.arange(n + 1) for n in range(255)] arr2 = [np.arange(255, 256 + n) for n in range(255)] # A dummy non-VLA column needed to reproduce issue #47 c = fits.Column('test', format='J', array=np.arange(255)) c1 = fits.Column('A', format='PJ', array=arr1) c2 = fits.Column('B', format='PJ', array=arr2) t1 = fits.BinTableHDU.from_columns([c, c1]) t2 = fits.BinTableHDU.from_columns([c, c2]) hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2]) hdul.writeto(self.temp('test.fits'), overwrite=True) # Just test that the test file wrote out correctly with fits.open(self.temp('test.fits')) as h: assert h[1].header['TFORM2'] == 'PJ(255)' assert h[2].header['TFORM2'] == 'PJ(255)' assert comparerecords(h[1].data, t1.data) assert comparerecords(h[2].data, t2.data) # Try copying the second VLA and writing to a new file with fits.open(self.temp('test.fits')) as h: new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header) new_hdu.writeto(self.temp('test3.fits')) with fits.open(self.temp('test3.fits')) as h2: assert comparerecords(h2[1].data, t2.data) new_hdul = fits.HDUList([fits.PrimaryHDU()]) new_hdul.writeto(self.temp('test2.fits')) # Open several copies of the test file and append copies of the second # VLA table with fits.open(self.temp('test2.fits'), mode='append') as new_hdul: for _ in range(2): with fits.open(self.temp('test.fits')) as h: new_hdul.append(h[2]) new_hdul.flush() # Test that all the VLA copies wrote correctly with fits.open(self.temp('test2.fits')) as new_hdul: for idx in range(1, 3): assert comparerecords(new_hdul[idx].data, t2.data) # These are tests that solely test the Column and ColDefs interfaces and # related functionality without directly involving full tables; currently there # are few of these but I expect there to be more as I improve the test coverage class TestColumnFunctions(FitsTestCase): def test_column_format_interpretation(self): """ Test to ensure that when Numpy-style record formats are passed in to the Column constructor for the format argument, they are recognized so long as it's unambiguous (where "unambiguous" here is questionable since Numpy is case insensitive when parsing the format codes. But their "proper" case is lower-case, so we can accept that. Basically, actually, any key in the NUMPY2FITS dict should be accepted. """ for recformat, fitsformat in NUMPY2FITS.items(): c = fits.Column('TEST', np.dtype(recformat)) c.format == fitsformat c = fits.Column('TEST', recformat) c.format == fitsformat c = fits.Column('TEST', fitsformat) c.format == fitsformat # Test a few cases that are ambiguous in that they *are* valid binary # table formats though not ones that are likely to be used, but are # also valid common ASCII table formats c = fits.Column('TEST', 'I4') assert c.format == 'I4' assert c.format.format == 'I' assert c.format.width == 4 c = fits.Column('TEST', 'F15.8') assert c.format == 'F15.8' assert c.format.format == 'F' assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column('TEST', 'E15.8') assert c.format.format == 'E' assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column('TEST', 'D15.8') assert c.format.format == 'D' assert c.format.width == 15 assert c.format.precision == 8 # zero-precision should be allowed as well, for float types # https://github.com/astropy/astropy/issues/3422 c = fits.Column('TEST', 'F10.0') assert c.format.format == 'F' assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column('TEST', 'E10.0') assert c.format.format == 'E' assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column('TEST', 'D10.0') assert c.format.format == 'D' assert c.format.width == 10 assert c.format.precision == 0 # These are a couple cases where the format code is a valid binary # table format, and is not strictly a valid ASCII table format but # could be *interpreted* as one by appending a default width. This # will only happen either when creating an ASCII table or when # explicitly specifying ascii=True when the column is created c = fits.Column('TEST', 'I') assert c.format == 'I' assert c.format.recformat == 'i2' c = fits.Column('TEST', 'I', ascii=True) assert c.format == 'I10' c = fits.Column('TEST', 'E') assert c.format == 'E' assert c.format.recformat == 'f4' c = fits.Column('TEST', 'E', ascii=True) assert c.format == 'E15.7' # F is not a valid binary table format so it should be unambiguously # treated as an ASCII column c = fits.Column('TEST', 'F') assert c.format == 'F16.7' c = fits.Column('TEST', 'D') assert c.format == 'D' assert c.format.recformat == 'f8' c = fits.Column('TEST', 'D', ascii=True) assert c.format == 'D25.17' def test_zero_precision_float_column(self): """ Regression test for https://github.com/astropy/astropy/issues/3422 """ c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3]) # The decimal places will be clipped t = fits.TableHDU.from_columns([c]) t.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].header['TFORM1'] == 'F5.0' assert hdul[1].data['TEST'].dtype == np.dtype('float64') assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0]) # Check how the raw data looks raw = np.rec.recarray.field(hdul[1].data, 'TEST') assert raw.tostring() == b' 1. 2. 3.' def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column('mag', format='E', array=arr) assert (arr == col.array).all() def test_new_coldefs_with_invalid_seqence(self): """Test that a TypeError is raised when a ColDefs is instantiated with a sequence of non-Column objects. """ pytest.raises(TypeError, fits.ColDefs, [1, 2, 3]) def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data('tb.fits')) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data('ascii.fits')) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data('random_groups.fits')) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data('zerowidth.fits')) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) assert comparerecords(zwc_pl, zwc[2].data) def test_column_lookup_by_name(self): """Tests that a `ColDefs` can be indexed by column name.""" a = fits.Column(name='a', format='D') b = fits.Column(name='b', format='D') cols = fits.ColDefs([a, b]) assert cols['a'] == cols[0] assert cols['b'] == cols[1] def test_column_attribute_change_after_removal(self): """ This is a test of the column attribute change notification system. After a column has been removed from a table (but other references are kept to that same column) changes to that column's attributes should not trigger a notification on the table it was removed from. """ # One way we can check this is to ensure there are no further changes # to the header table = fits.BinTableHDU.from_columns([ fits.Column('a', format='D'), fits.Column('b', format='D')]) b = table.columns['b'] table.columns.del_col('b') assert table.data.dtype.names == ('a',) b.name = 'HELLO' assert b.name == 'HELLO' assert 'TTYPE2' not in table.header assert table.header['TTYPE1'] == 'a' assert table.columns.names == ['a'] with pytest.raises(KeyError): table.columns['b'] # Make sure updates to the remaining column still work table.columns.change_name('a', 'GOODBYE') with pytest.raises(KeyError): table.columns['a'] assert table.columns['GOODBYE'].name == 'GOODBYE' assert table.data.dtype.names == ('GOODBYE',) assert table.columns.names == ['GOODBYE'] assert table.data.columns.names == ['GOODBYE'] table.columns['GOODBYE'].name = 'foo' with pytest.raises(KeyError): table.columns['GOODBYE'] assert table.columns['foo'].name == 'foo' assert table.data.dtype.names == ('foo',) assert table.columns.names == ['foo'] assert table.data.columns.names == ['foo'] def test_x_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the X (bit array) format can be deep-copied. """ c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array == c.array) def test_p_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the P/Q formats (variable length arrays) can be deep-copied. """ c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array[0] == c.array[0]) assert np.all(c2.array[1] == c.array[1]) c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]]) c4 = copy.deepcopy(c3) assert c4.name == c3.name assert c4.format == c3.format assert np.all(c4.array[0] == c3.array[0]) assert np.all(c4.array[1] == c3.array[1]) def test_column_verify_keywords(self): """ Test that the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), are verified to have a valid value. """ with pytest.raises(AssertionError) as err: c = fits.Column(1, format='I', array=[1, 2, 3, 4, 5]) assert 'Column name must be a string able to fit' in str(err.value) with pytest.raises(VerifyError) as err: c = fits.Column('col', format='I', null='Nan', disp=1, coord_type=1, coord_unit=2, coord_ref_point='1', coord_ref_value='1', coord_inc='1', time_ref_pos=1) err_msgs = ['keyword arguments to Column were invalid', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'] for msg in err_msgs: assert msg in str(err.value) def test_column_verify_start(self): """ Regression test for https://github.com/astropy/astropy/pull/6359 Test the validation of the column start position option (ASCII table only), corresponding to ``TBCOL`` keyword. Test whether the VerifyError message generated is the one with highest priority, i.e. the order of error messages to be displayed is maintained. """ with pytest.raises(VerifyError) as err: c = fits.Column('a', format='B', start='a', array=[1, 2, 3]) assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value) with pytest.raises(VerifyError) as err: c = fits.Column('a', format='I', start='a', array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value) with pytest.raises(VerifyError) as err: c = fits.Column('a', format='I', start='-56', array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value) def test_regression_5383(): # Regression test for an undefined variable x = np.array([1, 2, 3]) col = fits.Column(name='a', array=x, format='E') hdu = fits.BinTableHDU.from_columns([col]) del hdu._header['TTYPE1'] hdu.columns[0].name = 'b' def test_table_to_hdu(): from ....table import Table table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]], names=['a', 'b', 'c'], dtype=['i', 'U1', 'f']) table['a'].unit = 'm/s' table['b'].unit = 'not-a-unit' table.meta['foo'] = 'bar' with catch_warnings() as w: hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1})) assert len(w) == 1 assert str(w[0].message).startswith("'not-a-unit' did not parse as" " fits unit") for name in 'abc': assert np.array_equal(table[name], hdu.data[name]) # Check that TUNITn cards appear in the correct order # (https://github.com/astropy/astropy/pull/5720) assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2') assert hdu.header['FOO'] == 'bar' assert hdu.header['TEST'] == 1 def test_regression_scalar_indexing(): # Indexing a FITS_rec with a tuple that returns a scalar record # should work x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]).view(fits.FITS_rec) x1a = x[1] # this should succeed. x1b = x[(1,)] # FITS_record does not define __eq__; so test elements. assert all(a == b for a, b in zip(x1a, x1b)) def test_new_column_attributes_preserved(tmpdir): # Regression test for https://github.com/astropy/astropy/issues/7145 # This makes sure that for now we don't clear away keywords that have # newly been recognized (in Astropy 3.0) as special column attributes but # instead just warn that we might do so in future. The new keywords are: # TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS col = [] col.append(fits.Column(name="TIME", format="1E", unit="s")) col.append(fits.Column(name="RAWX", format="1I", unit="pixel")) col.append(fits.Column(name="RAWY", format="1I")) cd = fits.ColDefs(col) hdr = fits.Header() # Keywords that will get ignored in favor of these in the data hdr['TUNIT1'] = 'pixel' hdr['TUNIT2'] = 'm' hdr['TUNIT3'] = 'm' # Keywords that were added in Astropy 3.0 that should eventually be # ignored and set on the data instead hdr['TCTYP2'] = 'RA---TAN' hdr['TCTYP3'] = 'ANGLE' hdr['TCRVL2'] = -999.0 hdr['TCRVL3'] = -999.0 hdr['TCRPX2'] = 1.0 hdr['TCRPX3'] = 1.0 hdr['TALEN2'] = 16384 hdr['TALEN3'] = 1024 hdr['TCUNI2'] = 'angstrom' hdr['TCUNI3'] = 'deg' # Other non-relevant keywords hdr['RA'] = 1.5 hdr['DEC'] = 3.0 with pytest.warns(AstropyDeprecationWarning) as warning_list: hdu = fits.BinTableHDU.from_columns(cd, hdr) assert str(warning_list[0].message).startswith("The following keywords are now recognized as special") # First, check that special keywords such as TUNIT are ignored in the header # We may want to change that behavior in future, but this is the way it's # been for a while now. assert hdu.columns[0].unit == 's' assert hdu.columns[1].unit == 'pixel' assert hdu.columns[2].unit is None assert hdu.header['TUNIT1'] == 's' assert hdu.header['TUNIT2'] == 'pixel' assert 'TUNIT3' not in hdu.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu.columns[0].coord_type is None assert hdu.columns[1].coord_type is None assert hdu.columns[2].coord_type is None assert 'TCTYP1' not in hdu.header assert hdu.header['TCTYP2'] == 'RA---TAN' assert hdu.header['TCTYP3'] == 'ANGLE' # Make sure that other keywords are still there assert hdu.header['RA'] == 1.5 assert hdu.header['DEC'] == 3.0 # Now we can write this HDU to a file and re-load. Re-loading *should* # cause the special column attribtues to be picked up (it's just that when a # header is manually specified, these values are ignored) filename = tmpdir.join('test.fits').strpath hdu.writeto(filename) # Make sure we don't emit a warning in this case with pytest.warns(None) as warning_list: hdu2 = fits.open(filename)[1] assert len(warning_list) == 0 # Check that column attributes are now correctly set assert hdu2.columns[0].unit == 's' assert hdu2.columns[1].unit == 'pixel' assert hdu2.columns[2].unit is None assert hdu2.header['TUNIT1'] == 's' assert hdu2.header['TUNIT2'] == 'pixel' assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu2.columns[0].coord_type is None assert hdu2.columns[1].coord_type == 'RA---TAN' assert hdu2.columns[2].coord_type == 'ANGLE' assert 'TCTYP1' not in hdu2.header assert hdu2.header['TCTYP2'] == 'RA---TAN' assert hdu2.header['TCTYP3'] == 'ANGLE' # Make sure that other keywords are still there assert hdu2.header['RA'] == 1.5 assert hdu2.header['DEC'] == 3.0
c5b156b1b274f136089770048bed561ef9b5a9e895969b4035f426fd0bb5c609
# Licensed under a 3-clause BSD style license - see PYFITS.rst import sys import warnings import pytest import numpy as np from .test_table import comparerecords from ..hdu.base import _ValidHDU from ....io import fits from . import FitsTestCase class TestChecksumFunctions(FitsTestCase): # All checksums have been verified against CFITSIO def setup(self): super().setup() self._oldfilters = warnings.filters[:] warnings.filterwarnings( 'error', message='Checksum verification failed') warnings.filterwarnings( 'error', message='Datasum verification failed') # Monkey-patch the _get_timestamp method so that the checksum # timestamps (and hence the checksum themselves) are always the same self._old_get_timestamp = _ValidHDU._get_timestamp _ValidHDU._get_timestamp = lambda self: '2013-12-20T13:36:10' def teardown(self): super().teardown() warnings.filters = self._oldfilters _ValidHDU._get_timestamp = self._old_get_timestamp def test_sample_file(self): hdul = fits.open(self.data('checksum.fits'), checksum=True) hdul.close() def test_image_create(self): n = np.arange(100, dtype=np.int64) hdu = fits.PrimaryHDU(n) hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert (hdu.data == hdul[0].data).all() assert 'CHECKSUM' in hdul[0].header assert 'DATASUM' in hdul[0].header if not sys.platform.startswith('win32'): # The checksum ends up being different on Windows, possibly due # to slight floating point differences assert hdul[0].header['CHECKSUM'] == 'ZHMkeGKjZGKjbGKj' assert hdul[0].header['DATASUM'] == '4950' def test_scaled_data(self): with fits.open(self.data('scale.fits')) as hdul: orig_data = hdul[0].data.copy() hdul[0].scale('int16', 'old') hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul1: assert (hdul1[0].data == orig_data).all() assert 'CHECKSUM' in hdul1[0].header assert hdul1[0].header['CHECKSUM'] == 'cUmaeUjZcUjacUjW' assert 'DATASUM' in hdul1[0].header assert hdul1[0].header['DATASUM'] == '1891563534' def test_scaled_data_auto_rescale(self): """ Regression test for https://github.com/astropy/astropy/issues/3883#issuecomment-115122647 Ensure that when scaled data is automatically rescaled on opening/writing a file that the checksum and datasum are computed for the rescaled array. """ with fits.open(self.data('scale.fits')) as hdul: # Write out a copy of the data with the rescaling applied hdul.writeto(self.temp('rescaled.fits')) # Reopen the new file and save it back again with a checksum with fits.open(self.temp('rescaled.fits')) as hdul: hdul.writeto(self.temp('rescaled2.fits'), overwrite=True, checksum=True) # Now do like in the first writeto but use checksum immediately with fits.open(self.data('scale.fits')) as hdul: hdul.writeto(self.temp('rescaled3.fits'), checksum=True) # Also don't rescale the data but add a checksum with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul: hdul.writeto(self.temp('scaled.fits'), checksum=True) # Must used nested with statements to support older Python versions # (but contextlib.nested is not available in newer Pythons :( with fits.open(self.temp('rescaled2.fits')) as hdul1: with fits.open(self.temp('rescaled3.fits')) as hdul2: with fits.open(self.temp('scaled.fits')) as hdul3: hdr1 = hdul1[0].header hdr2 = hdul2[0].header hdr3 = hdul3[0].header assert hdr1['DATASUM'] == hdr2['DATASUM'] assert hdr1['CHECKSUM'] == hdr2['CHECKSUM'] assert hdr1['DATASUM'] != hdr3['DATASUM'] assert hdr1['CHECKSUM'] != hdr3['CHECKSUM'] def test_uint16_data(self): checksums = [ ('aDcXaCcXaCcXaCcX', '0'), ('oYiGqXi9oXiEoXi9', '1746888714'), ('VhqQWZoQVfoQVZoQ', '0'), ('4cPp5aOn4aOn4aOn', '0'), ('8aCN8X9N8aAN8W9N', '1756785133'), ('UhqdUZnbUfnbUZnb', '0'), ('4cQJ5aN94aNG4aN9', '0')] with fits.open(self.data('o4sp040b0_raw.fits'), uint=True) as hdul: hdul.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), uint=True, checksum=True) as hdul1: for idx, (hdu_a, hdu_b) in enumerate(zip(hdul, hdul1)): if hdu_a.data is None or hdu_b.data is None: assert hdu_a.data is hdu_b.data else: assert (hdu_a.data == hdu_b.data).all() assert 'CHECKSUM' in hdul[idx].header assert hdul[idx].header['CHECKSUM'] == checksums[idx][0] assert 'DATASUM' in hdul[idx].header assert hdul[idx].header['DATASUM'] == checksums[idx][1] def test_groups_hdu_data(self): imdata = np.arange(100.0) imdata.shape = (10, 1, 1, 2, 5) pdata1 = np.arange(10) + 0.1 pdata2 = 42 x = fits.hdu.groups.GroupData(imdata, parnames=[str('abc'), str('xyz')], pardata=[pdata1, pdata2], bitpix=-32) hdu = fits.GroupsHDU(x) hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(hdul[0].data, hdu.data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == '3eDQAZDO4dDOAZDO' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '2797758084' def test_binary_table_data(self): a1 = np.array(['NGC1001', 'NGC1002', 'NGC1003']) a2 = np.array([11.1, 12.3, 15.2]) col1 = fits.Column(name='target', format='20A', array=a1) col2 = fits.Column(name='V_mag', format='E', array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(tbhdu.data, hdul[1].data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == 'aD1Oa90MaC0Ma90M' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1062205743' def test_variable_length_table_data(self): c1 = fits.Column(name='var', format='PJ()', array=np.array([[45.0, 56], np.array([11, 12, 13])], 'O')) c2 = fits.Column(name='xyz', format='2I', array=[[11, 3], [12, 4]]) tbhdu = fits.BinTableHDU.from_columns([c1, c2]) tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(tbhdu.data, hdul[1].data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == 'YIGoaIEmZIEmaIEm' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1507485' def test_ascii_table_data(self): a1 = np.array(['abc', 'def']) r1 = np.array([11.0, 12.0]) c1 = fits.Column(name='abc', format='A3', array=a1) # This column used to be E format, but the single-precision float lost # too much precision when scaling so it was changed to a D c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3, bzero=0.6) c3 = fits.Column(name='t1', format='I', array=[91, 92, 93]) x = fits.ColDefs([c1, c2, c3]) hdu = fits.TableHDU.from_columns(x) hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(hdu.data, hdul[1].data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' if not sys.platform.startswith('win32'): # The checksum ends up being different on Windows, possibly due # to slight floating point differences assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == '3rKFAoI94oICAoI9' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1914653725' def test_compressed_image_data(self): with fits.open(self.data('comp.fits')) as h1: h1.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as h2: assert np.all(h1[1].data == h2[1].data) assert 'CHECKSUM' in h2[0].header assert h2[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in h2[0].header assert h2[0].header['DATASUM'] == '0' assert 'CHECKSUM' in h2[1].header assert h2[1].header['CHECKSUM'] == 'ZeAbdb8aZbAabb7a' assert 'DATASUM' in h2[1].header assert h2[1].header['DATASUM'] == '113055149' def test_compressed_image_data_int16(self): n = np.arange(100, dtype='int16') hdu = fits.ImageHDU(n) comp_hdu = fits.CompImageHDU(hdu.data, hdu.header) comp_hdu.writeto(self.temp('tmp.fits'), checksum=True) hdu.writeto(self.temp('uncomp.fits'), checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert np.all(hdul[1].data == comp_hdu.data) assert np.all(hdul[1].data == hdu.data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1]._header['CHECKSUM'] == 'J5cCJ5c9J5cAJ5c9' assert 'DATASUM' in hdul[1].header assert hdul[1]._header['DATASUM'] == '2453673070' assert 'CHECKSUM' in hdul[1].header with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2: header_comp = hdul[1]._header header_uncomp = hdul2[1].header assert 'ZHECKSUM' in header_comp assert 'CHECKSUM' in header_uncomp assert header_uncomp['CHECKSUM'] == 'ZE94eE91ZE91bE91' assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM'] assert 'ZDATASUM' in header_comp assert 'DATASUM' in header_uncomp assert header_uncomp['DATASUM'] == '160565700' assert header_comp['ZDATASUM'] == header_uncomp['DATASUM'] def test_compressed_image_data_float32(self): n = np.arange(100, dtype='float32') hdu = fits.ImageHDU(n) comp_hdu = fits.CompImageHDU(hdu.data, hdu.header) comp_hdu.writeto(self.temp('tmp.fits'), checksum=True) hdu.writeto(self.temp('uncomp.fits'), checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert np.all(hdul[1].data == comp_hdu.data) assert np.all(hdul[1].data == hdu.data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert 'DATASUM' in hdul[1].header if not sys.platform.startswith('win32'): # The checksum ends up being different on Windows, possibly due # to slight floating point differences assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH' assert hdul[1]._header['DATASUM'] == '1277667818' with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2: header_comp = hdul[1]._header header_uncomp = hdul2[1].header assert 'ZHECKSUM' in header_comp assert 'CHECKSUM' in header_uncomp assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2' assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM'] assert 'ZDATASUM' in header_comp assert 'DATASUM' in header_uncomp assert header_uncomp['DATASUM'] == '2393636889' assert header_comp['ZDATASUM'] == header_uncomp['DATASUM'] def test_open_with_no_keywords(self): hdul = fits.open(self.data('arange.fits'), checksum=True) hdul.close() def test_append(self): hdul = fits.open(self.data('tb.fits')) hdul.writeto(self.temp('tmp.fits'), overwrite=True) n = np.arange(100) fits.append(self.temp('tmp.fits'), n, checksum=True) hdul.close() hdul = fits.open(self.temp('tmp.fits'), checksum=True) assert hdul[0]._checksum is None hdul.close() def test_writeto_convenience(self): n = np.arange(100) fits.writeto(self.temp('tmp.fits'), n, overwrite=True, checksum=True) hdul = fits.open(self.temp('tmp.fits'), checksum=True) self._check_checksums(hdul[0]) hdul.close() def test_hdu_writeto(self): n = np.arange(100, dtype='int16') hdu = fits.ImageHDU(n) hdu.writeto(self.temp('tmp.fits'), checksum=True) hdul = fits.open(self.temp('tmp.fits'), checksum=True) self._check_checksums(hdul[0]) hdul.close() def test_hdu_writeto_existing(self): """ Tests that when using writeto with checksum=True, a checksum and datasum are added to HDUs that did not previously have one. Regression test for https://github.com/spacetelescope/PyFITS/issues/8 """ with fits.open(self.data('tb.fits')) as hdul: hdul.writeto(self.temp('test.fits'), checksum=True) with fits.open(self.temp('test.fits')) as hdul: assert 'CHECKSUM' in hdul[0].header # These checksums were verified against CFITSIO assert hdul[0].header['CHECKSUM'] == '7UgqATfo7TfoATfo' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == '99daD8bX98baA8bU' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1829680925' def test_datasum_only(self): n = np.arange(100, dtype='int16') hdu = fits.ImageHDU(n) hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum='datasum') with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: if not (hasattr(hdul[0], '_datasum') and hdul[0]._datasum): pytest.fail(msg='Missing DATASUM keyword') if not (hasattr(hdul[0], '_checksum') and not hdul[0]._checksum): pytest.fail(msg='Non-empty CHECKSUM keyword') def test_open_update_mode_preserve_checksum(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 where checksums are being removed from headers when a file is opened in update mode, even though no changes were made to the file. """ self.copy_file('checksum.fits') with fits.open(self.temp('checksum.fits')) as hdul: data = hdul[1].data.copy() hdul = fits.open(self.temp('checksum.fits'), mode='update') hdul.close() with fits.open(self.temp('checksum.fits')) as hdul: assert 'CHECKSUM' in hdul[1].header assert 'DATASUM' in hdul[1].header assert comparerecords(data, hdul[1].data) def test_open_update_mode_update_checksum(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148, part 2. This ensures that if a file contains a checksum, the checksum is updated when changes are saved to the file, even if the file was opened with the default of checksum=False. An existing checksum and/or datasum are only stripped if the file is opened with checksum='remove'. """ self.copy_file('checksum.fits') with fits.open(self.temp('checksum.fits')) as hdul: header = hdul[1].header.copy() data = hdul[1].data.copy() with fits.open(self.temp('checksum.fits'), mode='update') as hdul: hdul[1].header['FOO'] = 'BAR' hdul[1].data[0]['TIME'] = 42 with fits.open(self.temp('checksum.fits')) as hdul: header2 = hdul[1].header data2 = hdul[1].data assert header2[:-3] == header[:-2] assert 'CHECKSUM' in header2 assert 'DATASUM' in header2 assert header2['FOO'] == 'BAR' assert (data2['TIME'][1:] == data['TIME'][1:]).all() assert data2['TIME'][0] == 42 with fits.open(self.temp('checksum.fits'), mode='update', checksum='remove') as hdul: pass with fits.open(self.temp('checksum.fits')) as hdul: header2 = hdul[1].header data2 = hdul[1].data assert header2[:-1] == header[:-2] assert 'CHECKSUM' not in header2 assert 'DATASUM' not in header2 assert header2['FOO'] == 'BAR' assert (data2['TIME'][1:] == data['TIME'][1:]).all() assert data2['TIME'][0] == 42 def test_overwrite_invalid(self): """ Tests that invalid checksum or datasum are overwriten when the file is saved. """ reffile = self.temp('ref.fits') with fits.open(self.data('tb.fits')) as hdul: hdul.writeto(reffile, checksum=True) testfile = self.temp('test.fits') with fits.open(self.data('tb.fits')) as hdul: hdul[0].header['DATASUM'] = '1 ' hdul[0].header['CHECKSUM'] = '8UgqATfo7TfoATfo' hdul[1].header['DATASUM'] = '2349680925' hdul[1].header['CHECKSUM'] = '11daD8bX98baA8bU' hdul.writeto(testfile) with fits.open(testfile) as hdul: hdul.writeto(self.temp('test2.fits'), checksum=True) with fits.open(self.temp('test2.fits')) as hdul: with fits.open(reffile) as ref: assert 'CHECKSUM' in hdul[0].header # These checksums were verified against CFITSIO assert hdul[0].header['CHECKSUM'] == ref[0].header['CHECKSUM'] assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == ref[1].header['CHECKSUM'] assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == ref[1].header['DATASUM'] def _check_checksums(self, hdu): if not (hasattr(hdu, '_datasum') and hdu._datasum): pytest.fail(msg='Missing DATASUM keyword') if not (hasattr(hdu, '_checksum') and hdu._checksum): pytest.fail(msg='Missing CHECKSUM keyword')
2654b4274e02d52936c0b086c7b9a65b4669524812ead5b3f37ea1ce717167cf
# Licensed under a 3-clause BSD style license - see PYFITS.rst import numpy as np from ....io import fits from . import FitsTestCase from ....tests.helper import catch_warnings class TestDivisionFunctions(FitsTestCase): """Test code units that rely on correct integer division.""" def test_rec_from_string(self): t1 = fits.open(self.data('tb.fits')) s = t1[1].data.tostring() a1 = np.rec.array( s, dtype=np.dtype([('c1', '>i4'), ('c2', '|S3'), ('c3', '>f4'), ('c4', '|i1')]), shape=len(s) // 12) def test_card_with_continue(self): h = fits.PrimaryHDU() with catch_warnings() as w: h.header['abc'] = 'abcdefg' * 20 assert len(w) == 0 def test_valid_hdu_size(self): t1 = fits.open(self.data('tb.fits')) assert type(t1[1].size) is type(1) # nopep8 def test_hdu_get_size(self): with catch_warnings() as w: t1 = fits.open(self.data('tb.fits')) assert len(w) == 0 def test_section(self, capsys): # section testing fs = fits.open(self.data('arange.fits')) with catch_warnings() as w: assert np.all(fs[0].section[3, 2, 5] == np.array([357])) assert len(w) == 0
1aaa163cba9568099ba12e7400456c618d6621f83f7d73ee5a0285cc09d30b6f
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import time import pytest import numpy as np from . import FitsTestCase from .test_table import comparerecords from ....io import fits class TestGroupsFunctions(FitsTestCase): def test_open(self): with fits.open(self.data('random_groups.fits')) as hdul: assert isinstance(hdul[0], fits.GroupsHDU) naxes = (3, 1, 128, 1, 1) parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE'] info = [(0, 'PRIMARY', 1, 'GroupsHDU', 147, naxes, 'float32', '3 Groups 5 Parameters')] assert hdul.info(output=False) == info ghdu = hdul[0] assert ghdu.parnames == parameters assert list(ghdu.data.dtype.names) == parameters + ['DATA'] assert isinstance(ghdu.data, fits.GroupData) # The data should be equal to the number of groups assert ghdu.header['GCOUNT'] == len(ghdu.data) assert ghdu.data.data.shape == (len(ghdu.data),) + naxes[::-1] assert ghdu.data.parnames == parameters assert isinstance(ghdu.data[0], fits.Group) assert len(ghdu.data[0]) == len(parameters) + 1 assert ghdu.data[0].data.shape == naxes[::-1] assert ghdu.data[0].parnames == parameters def test_open_groups_in_update_mode(self): """ Test that opening a file containing a groups HDU in update mode and then immediately closing it does not result in any unnecessary file modifications. Similar to test_image.TestImageFunctions.test_open_scaled_in_update_mode(). """ # Copy the original file before making any possible changes to it self.copy_file('random_groups.fits') mtime = os.stat(self.temp('random_groups.fits')).st_mtime time.sleep(1) fits.open(self.temp('random_groups.fits'), mode='update', memmap=False).close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('random_groups.fits')).st_mtime def test_random_groups_data_update(self): """ Regression test for https://github.com/astropy/astropy/issues/3730 and for https://github.com/spacetelescope/PyFITS/issues/102 """ self.copy_file('random_groups.fits') with fits.open(self.temp('random_groups.fits'), mode='update') as h: h[0].data['UU'] = 0.42 with fits.open(self.temp('random_groups.fits'), mode='update') as h: assert np.all(h[0].data['UU'] == 0.42) def test_parnames_round_trip(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/130 Ensures that opening a random groups file in update mode or writing it to a new file does not cause any change to the parameter names. """ # Because this test tries to update the random_groups.fits file, let's # make a copy of it first (so that the file doesn't actually get # modified in the off chance that the test fails self.copy_file('random_groups.fits') parameters = ['UU', 'VV', 'WW', 'BASELINE', 'DATE'] with fits.open(self.temp('random_groups.fits'), mode='update') as h: assert h[0].parnames == parameters h.flush() # Open again just in read-only mode to ensure the parnames didn't # change with fits.open(self.temp('random_groups.fits')) as h: assert h[0].parnames == parameters h.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[0].parnames == parameters def test_groupdata_slice(self): """ A simple test to ensure that slicing GroupData returns a new, smaller GroupData object, as is the case with a normal FITS_rec. This is a regression test for an as-of-yet unreported issue where slicing GroupData returned a single Group record. """ with fits.open(self.data('random_groups.fits')) as hdul: s = hdul[0].data[1:] assert isinstance(s, fits.GroupData) assert len(s) == 2 assert hdul[0].data.parnames == s.parnames def test_group_slice(self): """ Tests basic slicing a single group record. """ # A very basic slice test with fits.open(self.data('random_groups.fits')) as hdul: g = hdul[0].data[0] s = g[2:4] assert len(s) == 2 assert s[0] == g[2] assert s[-1] == g[-3] s = g[::-1] assert len(s) == 6 assert (s[0] == g[-1]).all() assert s[-1] == g[0] s = g[::2] assert len(s) == 3 assert s[0] == g[0] assert s[1] == g[2] assert s[2] == g[4] def test_create_groupdata(self): """ Basic test for creating GroupData from scratch. """ imdata = np.arange(100.0) imdata.shape = (10, 1, 1, 2, 5) pdata1 = np.arange(10, dtype=np.float32) + 0.1 pdata2 = 42.0 x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz'], pardata=[pdata1, pdata2], bitpix=-32) assert x.parnames == ['abc', 'xyz'] assert (x.par('abc') == pdata1).all() assert (x.par('xyz') == ([pdata2] * len(x))).all() assert (x.data == imdata).all() # Test putting the data into a GroupsHDU and round-tripping it ghdu = fits.GroupsHDU(data=x) ghdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: hdr = h[0].header assert hdr['GCOUNT'] == 10 assert hdr['PCOUNT'] == 2 assert hdr['NAXIS'] == 5 assert hdr['NAXIS1'] == 0 assert hdr['NAXIS2'] == 5 assert hdr['NAXIS3'] == 2 assert hdr['NAXIS4'] == 1 assert hdr['NAXIS5'] == 1 assert h[0].data.parnames == ['abc', 'xyz'] assert comparerecords(h[0].data, x) def test_duplicate_parameter(self): """ Tests support for multiple parameters of the same name, and ensures that the data in duplicate parameters are returned as a single summed value. """ imdata = np.arange(100.0) imdata.shape = (10, 1, 1, 2, 5) pdata1 = np.arange(10, dtype=np.float32) + 1 pdata2 = 42.0 x = fits.hdu.groups.GroupData(imdata, parnames=['abc', 'xyz', 'abc'], pardata=[pdata1, pdata2, pdata1], bitpix=-32) assert x.parnames == ['abc', 'xyz', 'abc'] assert (x.par('abc') == pdata1 * 2).all() assert x[0].par('abc') == 2 # Test setting a parameter x[0].setpar(0, 2) assert x[0].par('abc') == 3 pytest.raises(ValueError, x[0].setpar, 'abc', 2) x[0].setpar('abc', (2, 3)) assert x[0].par('abc') == 5 assert x.par('abc')[0] == 5 assert (x.par('abc')[1:] == pdata1[1:] * 2).all() # Test round-trip ghdu = fits.GroupsHDU(data=x) ghdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: hdr = h[0].header assert hdr['PCOUNT'] == 3 assert hdr['PTYPE1'] == 'abc' assert hdr['PTYPE2'] == 'xyz' assert hdr['PTYPE3'] == 'abc' assert x.parnames == ['abc', 'xyz', 'abc'] assert x.dtype.names == ('abc', 'xyz', '_abc', 'DATA') assert x.par('abc')[0] == 5 assert (x.par('abc')[1:] == pdata1[1:] * 2).all()
f4e82591e73c034b2e816a4fb71c2478a58fff5cbcc875a7dd516c22576cd22e
# Licensed under a 3-clause BSD style license - see PYFITS.rst import numpy as np from ....io import fits from . import FitsTestCase class TestNonstandardHdus(FitsTestCase): def test_create_fitshdu(self): """ A round trip test of creating a FitsHDU, adding a FITS file to it, writing the FitsHDU out as part of a new FITS file, and then reading it and recovering the original FITS file. """ self._test_create_fitshdu(compression=False) def test_create_fitshdu_with_compression(self): """Same as test_create_fitshdu but with gzip compression enabled.""" self._test_create_fitshdu(compression=True) def test_create_fitshdu_from_filename(self): """Regression test on `FitsHDU.fromfile`""" # Build up a simple test FITS file a = np.arange(100) phdu = fits.PrimaryHDU(data=a) phdu.header['TEST1'] = 'A' phdu.header['TEST2'] = 'B' imghdu = fits.ImageHDU(data=a + 1) phdu.header['TEST3'] = 'C' phdu.header['TEST4'] = 'D' hdul = fits.HDUList([phdu, imghdu]) hdul.writeto(self.temp('test.fits')) fitshdu = fits.FitsHDU.fromfile(self.temp('test.fits')) hdul2 = fitshdu.hdulist assert len(hdul2) == 2 assert fits.FITSDiff(hdul, hdul2).identical def _test_create_fitshdu(self, compression=False): hdul_orig = fits.open(self.data('test0.fits'), do_not_scale_image_data=True) fitshdu = fits.FitsHDU.fromhdulist(hdul_orig, compress=compression) # Just to be meta, let's append to the same hdulist that the fitshdu # encapuslates hdul_orig.append(fitshdu) hdul_orig.writeto(self.temp('tmp.fits'), overwrite=True) del hdul_orig[-1] hdul = fits.open(self.temp('tmp.fits')) assert isinstance(hdul[-1], fits.FitsHDU) wrapped = hdul[-1].hdulist assert isinstance(wrapped, fits.HDUList) assert hdul_orig.info(output=False) == wrapped.info(output=False) assert (hdul[1].data == wrapped[1].data).all() assert (hdul[2].data == wrapped[2].data).all() assert (hdul[3].data == wrapped[3].data).all() assert (hdul[4].data == wrapped[4].data).all()
14ba701534932c1624c3d589625a35bb7980cba886caac5d356ce27cfb873e95
# Licensed under a 3-clause BSD style license - see PYFITS.rst import glob import io import os import platform import sys import pytest import numpy as np from ..verify import VerifyError from ....io import fits from ....tests.helper import raises, catch_warnings, ignore_warnings from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning from ....utils.compat import NUMPY_LT_1_12 from . import FitsTestCase class TestHDUListFunctions(FitsTestCase): def test_update_name(self): hdul = fits.open(self.data('o4sp040b0_raw.fits')) hdul[4].name = 'Jim' hdul[4].ver = 9 assert hdul[('JIM', 9)].header['extname'] == 'JIM' def test_hdu_file_bytes(self): hdul = fits.open(self.data('checksum.fits')) res = hdul[0].filebytes() assert res == 11520 res = hdul[1].filebytes() assert res == 8640 def test_hdulist_file_info(self): hdul = fits.open(self.data('checksum.fits')) res = hdul.fileinfo(0) def test_fileinfo(**kwargs): assert res['datSpan'] == kwargs.get('datSpan', 2880) assert res['resized'] == kwargs.get('resized', False) assert res['filename'] == self.data('checksum.fits') assert res['datLoc'] == kwargs.get('datLoc', 8640) assert res['hdrLoc'] == kwargs.get('hdrLoc', 0) assert res['filemode'] == 'readonly' res = hdul.fileinfo(1) test_fileinfo(datLoc=17280, hdrLoc=11520) hdu = fits.ImageHDU(data=hdul[0].data) hdul.insert(1, hdu) res = hdul.fileinfo(0) test_fileinfo(resized=True) res = hdul.fileinfo(1) test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None) res = hdul.fileinfo(2) test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520) def test_create_from_multiple_primary(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145 Ensure that a validation error occurs when saving an HDUList containing multiple PrimaryHDUs. """ hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()]) pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'), output_verify='exception') def test_append_primary_to_empty_list(self): # Tests appending a Simple PrimaryHDU to an empty HDUList. hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_extension_to_empty_list(self): """Tests appending a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_table_extension_to_empty_list(self): """Tests appending a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() hdul1 = fits.open(self.data('tb.fits')) hdul.append(hdul1[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_groupshdu_to_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_primary_to_non_empty_list(self): """Tests appending a Simple PrimaryHDU to a non-empty HDUList.""" hdul = fits.open(self.data('arange.fits')) hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info def test_append_extension_to_non_empty_list(self): """Tests appending a Simple ExtensionHDU to a non-empty HDUList.""" hdul = fits.open(self.data('tb.fits')) hdul.append(hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-append.fits')) assert fits.info(self.temp('test-append.fits'), output=False) == info @raises(ValueError) def test_append_groupshdu_to_non_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) hdu = fits.GroupsHDU() hdul.append(hdu) def test_insert_primary_to_empty_list(self): """Tests inserting a Simple PrimaryHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_extension_to_empty_list(self): """Tests inserting a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_table_extension_to_empty_list(self): """Tests inserting a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() hdul1 = fits.open(self.data('tb.fits')) hdul.insert(0, hdul1[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_groupshdu_to_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_primary_to_non_empty_list(self): """Tests inserting a Simple PrimaryHDU to a non-empty HDUList.""" hdul = fits.open(self.data('arange.fits')) hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(1, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_extension_to_non_empty_list(self): """Tests inserting a Simple ExtensionHDU to a non-empty HDUList.""" hdul = fits.open(self.data('tb.fits')) hdul.insert(1, hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_groupshdu_to_non_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.insert(1, hdu) info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '', '1 Groups 0 Parameters'), (1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')] hdul.insert(0, hdu) assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info @raises(ValueError) def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self): """ Tests inserting a Simple GroupsHDU to the beginning of an HDUList that that already contains a GroupsHDU. """ hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) hdul.insert(0, hdu) def test_insert_extension_to_primary_in_non_empty_list(self): # Tests inserting a Simple ExtensionHDU to a non-empty HDUList. hdul = fits.open(self.data('tb.fits')) hdul.insert(0, hdul[1]) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''), (1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''), (2, '', 1, 'ImageHDU', 12, (), '', ''), (3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_insert_image_extension_to_primary_in_non_empty_list(self): """ Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList as the primary HDU. """ hdul = fits.open(self.data('tb.fits')) hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''), (1, '', 1, 'ImageHDU', 12, (), '', ''), (2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')] assert hdul.info(output=False) == info hdul.writeto(self.temp('test-insert.fits')) assert fits.info(self.temp('test-insert.fits'), output=False) == info def test_filename(self): """Tests the HDUList filename method.""" hdul = fits.open(self.data('tb.fits')) name = hdul.filename() assert name == self.data('tb.fits') def test_file_like(self): """ Tests the use of a file like object with no tell or seek methods in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto() """ hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul = fits.HDUList() hdul.append(hdu) tmpfile = open(self.temp('tmpfile.fits'), 'wb') hdul.writeto(tmpfile) tmpfile.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_file_like_2(self): hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) tmpfile = open(self.temp('tmpfile.fits'), 'wb') hdul = fits.open(tmpfile, mode='ostream') hdul.append(hdu) hdul.flush() tmpfile.close() hdul.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_file_like_3(self): tmpfile = open(self.temp('tmpfile.fits'), 'wb') fits.writeto(tmpfile, np.arange(100, dtype=np.int32)) tmpfile.close() info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')] assert fits.info(self.temp('tmpfile.fits'), output=False) == info def test_new_hdu_extname(self): """ Tests that new extension HDUs that are added to an HDUList can be properly indexed by their EXTNAME/EXTVER (regression test for ticket:48). """ f = fits.open(self.data('test0.fits')) hdul = fits.HDUList() hdul.append(f[0].copy()) hdul.append(fits.ImageHDU(header=f[1].header)) assert hdul[1].header['EXTNAME'] == 'SCI' assert hdul[1].header['EXTVER'] == 1 assert hdul.index_of(('SCI', 1)) == 1 def test_update_filelike(self): """Test opening a file-like object in update mode and resizing the HDU. """ sf = io.BytesIO() arr = np.zeros((100, 100)) hdu = fits.PrimaryHDU(data=arr) hdu.writeto(sf) sf.seek(0) arr = np.zeros((200, 200)) hdul = fits.open(sf, mode='update') hdul[0].data = arr hdul.flush() sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_flush_readonly(self): """Test flushing changes to a file opened in a read only mode.""" oldmtime = os.stat(self.data('test0.fits')).st_mtime hdul = fits.open(self.data('test0.fits')) hdul[0].header['FOO'] = 'BAR' with catch_warnings(AstropyUserWarning) as w: hdul.flush() assert len(w) == 1 assert 'mode is not supported' in str(w[0].message) assert oldmtime == os.stat(self.data('test0.fits')).st_mtime def test_fix_extend_keyword(self): hdul = fits.HDUList() hdul.append(fits.PrimaryHDU()) hdul.append(fits.ImageHDU()) del hdul[0].header['EXTEND'] hdul.verify('silentfix') assert 'EXTEND' in hdul[0].header assert hdul[0].header['EXTEND'] is True def test_fix_malformed_naxisj(self): """ Tests that malformed NAXISj values are fixed sensibly. """ hdu = fits.open(self.data('arange.fits')) # Malform NAXISj header data hdu[0].header['NAXIS1'] = 11.0 hdu[0].header['NAXIS2'] = '10.0' hdu[0].header['NAXIS3'] = '7' # Axes cache needs to be malformed as well hdu[0]._axes = [11.0, '10.0', '7'] # Perform verification including the fix hdu.verify('silentfix') # Check that malformed data was converted assert hdu[0].header['NAXIS1'] == 11 assert hdu[0].header['NAXIS2'] == 10 assert hdu[0].header['NAXIS3'] == 7 def test_fix_wellformed_naxisj(self): """ Tests that wellformed NAXISj values are not modified. """ hdu = fits.open(self.data('arange.fits')) # Fake new NAXISj header data hdu[0].header['NAXIS1'] = 768 hdu[0].header['NAXIS2'] = 64 hdu[0].header['NAXIS3'] = 8 # Axes cache needs to be faked as well hdu[0]._axes = [768, 64, 8] # Perform verification including the fix hdu.verify('silentfix') # Check that malformed data was converted assert hdu[0].header['NAXIS1'] == 768 assert hdu[0].header['NAXIS2'] == 64 assert hdu[0].header['NAXIS3'] == 8 def test_new_hdulist_extend_keyword(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114 Tests that adding a PrimaryHDU to a new HDUList object updates the EXTEND keyword on that HDU. """ h0 = fits.Header() hdu = fits.PrimaryHDU(header=h0) sci = fits.ImageHDU(data=np.array(10)) image = fits.HDUList([hdu, sci]) image.writeto(self.temp('temp.fits')) assert 'EXTEND' in hdu.header assert hdu.header['EXTEND'] is True def test_replace_memmaped_array(self): # Copy the original before we modify it hdul = fits.open(self.data('test0.fits')) hdul.writeto(self.temp('temp.fits')) hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True) old_data = hdul[1].data.copy() hdul[1].data = hdul[1].data + 1 hdul.close() hdul = fits.open(self.temp('temp.fits'), memmap=True) assert ((old_data + 1) == hdul[1].data).all() def test_open_file_with_end_padding(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106 Open files with end padding bytes. """ hdul = fits.open(self.data('test0.fits'), do_not_scale_image_data=True) info = hdul.info(output=False) hdul.writeto(self.temp('temp.fits')) with open(self.temp('temp.fits'), 'ab') as f: f.seek(0, os.SEEK_END) f.write(b'\0' * 2880) with ignore_warnings(): assert info == fits.info(self.temp('temp.fits'), output=False, do_not_scale_image_data=True) def test_open_file_with_bad_header_padding(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136 Open files with nulls for header block padding instead of spaces. """ a = np.arange(100).reshape(10, 10) hdu = fits.PrimaryHDU(data=a) hdu.writeto(self.temp('temp.fits')) # Figure out where the header padding begins and fill it with nulls end_card_pos = str(hdu.header).index('END' + ' ' * 77) padding_start = end_card_pos + 80 padding_len = 2880 - padding_start with open(self.temp('temp.fits'), 'r+b') as f: f.seek(padding_start) f.write('\0'.encode('ascii') * padding_len) with catch_warnings(AstropyUserWarning) as w: with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == a).all() assert ('contains null bytes instead of spaces' in str(w[0].message)) assert len(w) == 1 assert len(hdul) == 1 assert str(hdul[0].header) == str(hdu.header) def test_update_with_truncated_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 Test that saving an update where the header is shorter than the original header doesn't leave a stump from the old header in the file. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(hdu.header) < 34: hdu.header['TEST{}'.format(idx)] = idx idx += 1 hdu.writeto(self.temp('temp.fits'), checksum=True) with fits.open(self.temp('temp.fits'), mode='update') as hdul: # Modify the header, forcing it to be rewritten hdul[0].header['TEST1'] = 2 with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == data).all() @pytest.mark.xfail(platform.system() == 'Windows' and not NUMPY_LT_1_12, reason='https://github.com/astropy/astropy/issues/5797') def test_update_resized_header(self): """ Test saving updates to a file where the header is one block smaller than before, and in the case where the heade ris one block larger than before. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(str(hdu.header)) <= 2880: hdu.header['TEST{}'.format(idx)] = idx idx += 1 orig_header = hdu.header.copy() hdu.writeto(self.temp('temp.fits')) with fits.open(self.temp('temp.fits'), mode='update') as hdul: while len(str(hdul[0].header)) > 2880: del hdul[0].header[-1] with fits.open(self.temp('temp.fits')) as hdul: assert hdul[0].header == orig_header[:-1] assert (hdul[0].data == data).all() with fits.open(self.temp('temp.fits'), mode='update') as hdul: idx = 101 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header['TEST{}'.format(idx)] = idx idx += 1 # Touch something in the data too so that it has to be rewritten hdul[0].data[0] = 27 with fits.open(self.temp('temp.fits')) as hdul: assert hdul[0].header[:-37] == orig_header[:-1] assert hdul[0].data[0] == 27 assert (hdul[0].data[1:] == data[1:]).all() def test_update_resized_header2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150 This is similar to test_update_resized_header, but specifically tests a case of multiple consecutive flush() calls on the same HDUList object, where each flush() requires a resize. """ data1 = np.arange(100) data2 = np.arange(100) + 100 phdu = fits.PrimaryHDU(data=data1) hdu = fits.ImageHDU(data=data2) phdu.writeto(self.temp('temp.fits')) with fits.open(self.temp('temp.fits'), mode='append') as hdul: hdul.append(hdu) with fits.open(self.temp('temp.fits'), mode='update') as hdul: idx = 1 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header['TEST{}'.format(idx)] = idx idx += 1 hdul.flush() hdul.append(hdu) with fits.open(self.temp('temp.fits')) as hdul: assert (hdul[0].data == data1).all() assert hdul[1].header == hdu.header assert (hdul[1].data == data2).all() assert (hdul[2].data == data2).all() @ignore_warnings() def test_hdul_fromstring(self): """ Test creating the HDUList structure in memory from a string containing an entire FITS file. This is similar to test_hdu_fromstring but for an entire multi-extension FITS file at once. """ # Tests HDUList.fromstring for all of Astropy's built in test files def test_fromstring(filename): with fits.open(filename) as hdul: orig_info = hdul.info(output=False) with open(filename, 'rb') as f: dat = f.read() hdul2 = fits.HDUList.fromstring(dat) assert orig_info == hdul2.info(output=False) for idx in range(len(hdul)): assert hdul[idx].header == hdul2[idx].header if hdul[idx].data is None or hdul2[idx].data is None: assert hdul[idx].data == hdul2[idx].data elif (hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields): # Compare tables for n in hdul[idx].data.names: c1 = hdul[idx].data[n] c2 = hdul2[idx].data[n] assert (c1 == c2).all() elif (any(dim == 0 for dim in hdul[idx].data.shape) or any(dim == 0 for dim in hdul2[idx].data.shape)): # For some reason some combinations of Python and Numpy # on Windows result in MemoryErrors when trying to work # on memmap arrays with more than one dimension but # some dimensions of size zero, so include a special # case for that return hdul[idx].data.shape == hdul2[idx].data.shape else: np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data) for filename in glob.glob(os.path.join(self.data_dir, '*.fits')): if sys.platform == 'win32' and filename == 'zerowidth.fits': # Running this test on this file causes a crash in some # versions of Numpy on Windows. See ticket: # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174 continue elif filename.endswith('variable_length_table.fits'): # Comparing variable length arrays is non-trivial and thus # skipped at this point. # TODO: That's probably possible, so one could make it work. continue test_fromstring(filename) # Test that creating an HDUList from something silly raises a TypeError pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c']) def test_save_backup(self): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121 Save backup of file before flushing changes. """ self.copy_file('scale.fits') with ignore_warnings(): with fits.open(self.temp('scale.fits'), mode='update', save_backup=True) as hdul: # Make some changes to the original file to force its header # and data to be rewritten hdul[0].header['TEST'] = 'TEST' hdul[0].data[0] = 0 assert os.path.exists(self.temp('scale.fits.bak')) with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul1: with fits.open(self.temp('scale.fits.bak'), do_not_scale_image_data=True) as hdul2: assert hdul1[0].header == hdul2[0].header assert (hdul1[0].data == hdul2[0].data).all() with ignore_warnings(): with fits.open(self.temp('scale.fits'), mode='update', save_backup=True) as hdul: # One more time to see if multiple backups are made hdul[0].header['TEST2'] = 'TEST' hdul[0].data[0] = 1 assert os.path.exists(self.temp('scale.fits.bak')) assert os.path.exists(self.temp('scale.fits.bak.1')) def test_replace_mmap_data(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): hdu_a = fits.PrimaryHDU(data=arr_a) hdu_a.writeto(self.temp('test_a.fits'), overwrite=True) hdu_b = fits.PrimaryHDU(data=arr_b) hdu_b.writeto(self.temp('test_b.fits'), overwrite=True) hdul_a = fits.open(self.temp('test_a.fits'), mode='update', memmap=mmap_a) hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b) hdul_a[0].data = hdul_b[0].data hdul_a.close() hdul_b.close() hdul_a = fits.open(self.temp('test_a.fits')) assert np.all(hdul_a[0].data == arr_b) with ignore_warnings(): test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_replace_mmap_data_2(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. Like test_replace_mmap_data but with table data instead of image data. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): col_a = fits.Column(name='a', format='J', array=arr_a) col_b = fits.Column(name='b', format='J', array=arr_b) hdu_a = fits.BinTableHDU.from_columns([col_a]) hdu_a.writeto(self.temp('test_a.fits'), overwrite=True) hdu_b = fits.BinTableHDU.from_columns([col_b]) hdu_b.writeto(self.temp('test_b.fits'), overwrite=True) hdul_a = fits.open(self.temp('test_a.fits'), mode='update', memmap=mmap_a) hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b) hdul_a[1].data = hdul_b[1].data hdul_a.close() hdul_b.close() hdul_a = fits.open(self.temp('test_a.fits')) assert 'b' in hdul_a[1].columns.names assert 'a' not in hdul_a[1].columns.names assert np.all(hdul_a[1].data['b'] == arr_b) with ignore_warnings(): test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_extname_in_hdulist(self): """ Tests to make sure that the 'in' operator works. Regression test for https://github.com/astropy/astropy/issues/3060 """ hdulist = fits.HDUList() hdulist.append(fits.ImageHDU(name='a')) assert 'a' in hdulist assert 'A' in hdulist assert ('a', 1) in hdulist assert ('A', 1) in hdulist assert 'b' not in hdulist assert ('a', 2) not in hdulist assert ('b', 1) not in hdulist assert ('b', 2) not in hdulist def test_overwrite_vs_clobber(self): hdulist = fits.HDUList([fits.PrimaryHDU()]) hdulist.writeto(self.temp('test_overwrite.fits')) hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True) with catch_warnings(AstropyDeprecationWarning) as warning_lines: hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"clobber" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "overwrite" instead.') def test_invalid_hdu_key_in_contains(self): """ Make sure invalid keys in the 'in' operator return False. Regression test for https://github.com/astropy/astropy/issues/5583 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU()) hdulist.append(fits.ImageHDU()) # A more or less random assortment of things which are not valid keys. bad_keys = [None, 3.5, {}] for key in bad_keys: assert not (key in hdulist) def test_iteration_of_lazy_loaded_hdulist(self): """ Regression test for https://github.com/astropy/astropy/issues/5585 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU(name='SCI')) hdulist.append(fits.ImageHDU(name='SCI')) hdulist.append(fits.ImageHDU(name='nada')) hdulist.append(fits.ImageHDU(name='SCI')) filename = self.temp('many_extension.fits') hdulist.writeto(filename) f = fits.open(filename) # Check that all extensions are read if f is not sliced all_exts = [ext for ext in f] assert len(all_exts) == 5 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Try a simple slice with no conditional on the ext. This is essentially # the reported failure. all_exts_but_zero = [ext for ext in f[1:]] assert len(all_exts_but_zero) == 4 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Check whether behavior is proper if the upper end of the slice is not # omitted. read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI'] assert len(read_exts) == 2 def test_proper_error_raised_on_non_fits_file_with_unicode(self): """ Regression test for https://github.com/astropy/astropy/issues/5594 The failure shows up when (in python 3+) you try to open a file with unicode content that is not actually a FITS file. See: https://github.com/astropy/astropy/issues/5594#issuecomment-266583218 """ import codecs filename = self.temp('not-fits-with-unicode.fits') with codecs.open(filename, mode='w', encoding='utf=8') as f: f.write(u'Ce\xe7i ne marche pas') # This should raise an OSError because there is no end card. with pytest.raises(OSError): fits.open(filename) def test_no_resource_warning_raised_on_non_fits_file(self): """ Regression test for https://github.com/astropy/astropy/issues/6168 The ResourceWarning shows up when (in python 3+) you try to open a non-FITS file when using a filename. """ # To avoid creating the file multiple times the tests are # all included in one test file. See the discussion to the # PR at https://github.com/astropy/astropy/issues/6168 # filename = self.temp('not-fits.fits') with open(filename, mode='w') as f: f.write('# header line\n') f.write('0.1 0.2\n') # Opening the file should raise an OSError however the file # is opened (there are two distinct code paths, depending on # whether ignore_missing_end is True or False). # # Explicit tests are added to make sure the file handle is not # closed when passed in to fits.open. In this case the ResourceWarning # was not raised, but a check is still included. # with catch_warnings(ResourceWarning) as ws: # Make sure that files opened by the user are not closed with open(filename, mode='rb') as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=False) assert not f.closed with open(filename, mode='rb') as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=True) assert not f.closed with pytest.raises(OSError): fits.open(filename, ignore_missing_end=False) with pytest.raises(OSError): fits.open(filename, ignore_missing_end=True) assert len(ws) == 0
430a8b6e68acd1f15fb7fcb832cfa9374b792f0d6bd3837a43228a59036bfc10
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from . import FitsTestCase from ..scripts import fitscheck from ... import fits class TestFitscheck(FitsTestCase): def test_noargs(self): with pytest.raises(SystemExit) as e: fitscheck.main(['-h']) assert e.value.code == 0 def test_missing_file(self, capsys): assert fitscheck.main(['missing.fits']) == 1 stdout, stderr = capsys.readouterr() assert 'No such file or directory' in stderr def test_valid_file(self, capsys): testfile = self.data('checksum.fits') assert fitscheck.main([testfile]) == 0 assert fitscheck.main([testfile, '--compliance']) == 0 assert fitscheck.main([testfile, '-v']) == 0 stdout, stderr = capsys.readouterr() assert 'OK' in stderr def test_remove_checksums(self, capsys): self.copy_file('checksum.fits') testfile = self.temp('checksum.fits') assert fitscheck.main([testfile, '--checksum', 'remove']) == 1 assert fitscheck.main([testfile]) == 1 stdout, stderr = capsys.readouterr() assert 'MISSING' in stderr def test_no_checksums(self, capsys): testfile = self.data('arange.fits') assert fitscheck.main([testfile]) == 1 stdout, stderr = capsys.readouterr() assert 'Checksum not found' in stderr assert fitscheck.main([testfile, '--ignore-missing']) == 0 stdout, stderr = capsys.readouterr() assert stderr == '' def test_overwrite_invalid(self, capsys): """ Tests that invalid checksum or datasum are overwriten when the file is saved. """ reffile = self.temp('ref.fits') with fits.open(self.data('tb.fits')) as hdul: hdul.writeto(reffile, checksum=True) # replace checksums with wrong ones testfile = self.temp('test.fits') with fits.open(self.data('tb.fits')) as hdul: hdul[0].header['DATASUM'] = '1 ' hdul[0].header['CHECKSUM'] = '8UgqATfo7TfoATfo' hdul[1].header['DATASUM'] = '2349680925' hdul[1].header['CHECKSUM'] = '11daD8bX98baA8bU' hdul.writeto(testfile) assert fitscheck.main([testfile]) == 1 stdout, stderr = capsys.readouterr() assert 'BAD' in stderr assert 'Checksum verification failed' in stderr assert fitscheck.main([testfile, '--write', '--force']) == 1 stdout, stderr = capsys.readouterr() assert 'BAD' in stderr # check that the file was fixed assert fitscheck.main([testfile]) == 0
4fcee229629f1cb15bbacbf82dce0fc75defa080b86126f6bfd5a92d609d8d2b
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os import shutil import stat import tempfile import time from ... import fits class FitsTestCase: def setup(self): self.data_dir = os.path.join(os.path.dirname(__file__), 'data') self.temp_dir = tempfile.mkdtemp(prefix='fits-test-') # Restore global settings to defaults # TODO: Replace this when there's a better way to in the config API to # force config values to their defaults fits.conf.enable_record_valued_keyword_cards = True fits.conf.extension_name_case_sensitive = False fits.conf.strip_header_whitespace = True fits.conf.use_memmap = True def teardown(self): if hasattr(self, 'temp_dir') and os.path.exists(self.temp_dir): tries = 3 while tries: try: shutil.rmtree(self.temp_dir) break except OSError: # Probably couldn't delete the file because for whatever # reason a handle to it is still open/hasn't been # garbage-collected time.sleep(0.5) tries -= 1 fits.conf.reset('enable_record_valued_keyword_cards') fits.conf.reset('extension_name_case_sensitive') fits.conf.reset('strip_header_whitespace') fits.conf.reset('use_memmap') def copy_file(self, filename): """Copies a backup of a test data file to the temp dir and sets its mode to writeable. """ shutil.copy(self.data(filename), self.temp(filename)) os.chmod(self.temp(filename), stat.S_IREAD | stat.S_IWRITE) def data(self, filename): """Returns the path to a test data file.""" return os.path.join(self.data_dir, filename) def temp(self, filename): """ Returns the full path to a file in the test temp dir.""" return os.path.join(self.temp_dir, filename)
a9f4a5d0d88b6e6a311858579684cb2135c410af8bce79b65a7e0ed9243a7c73
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os import warnings import pytest import numpy as np from ....io import fits from ....table import Table from .. import printdiff from ....tests.helper import catch_warnings from . import FitsTestCase class TestConvenience(FitsTestCase): def test_resource_warning(self): warnings.simplefilter('always', ResourceWarning) with catch_warnings() as w: data = fits.getdata(self.data('test0.fits')) assert len(w) == 0 with catch_warnings() as w: header = fits.getheader(self.data('test0.fits')) assert len(w) == 0 def test_fileobj_not_closed(self): """ Tests that file-like objects are not closed after being passed to convenience functions. Regression test for https://github.com/astropy/astropy/issues/5063 """ f = open(self.data('test0.fits'), 'rb') data = fits.getdata(f) assert not f.closed f.seek(0) header = fits.getheader(f) assert not f.closed def test_table_to_hdu(self): table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]], names=['a', 'b', 'c'], dtype=['i', 'U1', 'f']) table['a'].unit = 'm/s' table['b'].unit = 'not-a-unit' with catch_warnings() as w: hdu = fits.table_to_hdu(table) assert len(w) == 1 assert str(w[0].message).startswith("'not-a-unit' did not parse as" " fits unit") # Check that TUNITn cards appear in the correct order # (https://github.com/astropy/astropy/pull/5720) assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2') assert isinstance(hdu, fits.BinTableHDU) filename = self.temp('test_table_to_hdu.fits') hdu.writeto(filename, overwrite=True) def test_table_to_hdu_convert_comment_convention(self): """ Regression test for https://github.com/astropy/astropy/issues/6079 """ table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]], names=['a', 'b', 'c'], dtype=['i', 'U1', 'f']) table.meta['comments'] = ['This', 'is', 'a', 'comment'] hdu = fits.table_to_hdu(table) assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment'] with pytest.raises(ValueError): hdu.header.index('comments') def test_table_writeto_header(self): """ Regression test for https://github.com/astropy/astropy/issues/5988 """ data = np.zeros((5, ), dtype=[('x', float), ('y', int)]) h_in = fits.Header() h_in['ANSWER'] = (42.0, 'LTU&E') filename = self.temp('tabhdr42.fits') fits.writeto(filename, data=data, header=h_in, overwrite=True) h_out = fits.getheader(filename, ext=1) assert h_out['ANSWER'] == 42 def test_image_extension_update_header(self): """ Test that _makehdu correctly includes the header. For example in the fits.update convenience function. """ filename = self.temp('twoextension.fits') hdus = [fits.PrimaryHDU(np.zeros((10, 10))), fits.ImageHDU(np.zeros((10, 10)))] fits.HDUList(hdus).writeto(filename) fits.update(filename, np.zeros((10, 10)), header=fits.Header([('WHAT', 100)]), ext=1) h_out = fits.getheader(filename, ext=1) assert h_out['WHAT'] == 100 def test_printdiff(self): """ Test that FITSDiff can run the different inputs without crashing. """ # Testing different string input options assert printdiff(self.data('arange.fits'), self.data('blank.fits')) is None assert printdiff(self.data('arange.fits'), self.data('blank.fits'), ext=0) is None assert printdiff(self.data('o4sp040b0_raw.fits'), self.data('o4sp040b0_raw.fits'), extname='sci') is None # This may seem weird, but check printdiff to see, need to test # incorrect second file with pytest.raises(OSError): printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci') # Test HDU object inputs with fits.open(self.data('stddata.fits'), mode='readonly') as in1: with fits.open(self.data('checksum.fits'), mode='readonly') as in2: assert printdiff(in1[0], in2[0]) is None with pytest.raises(ValueError): printdiff(in1[0], in2[0], ext=0) assert printdiff(in1, in2) is None with pytest.raises(NotImplementedError): printdiff(in1, in2, 0) def test_tabledump(self): """ Regression test for https://github.com/astropy/astropy/issues/6937 """ # test without datafile filename = self.data('tb.fits') fits.tabledump(filename) assert os.path.isfile(self.data('tb_1.txt')) os.remove(self.data('tb_1.txt')) # test with datafile fits.tabledump(filename, datafile=self.temp('test_tb.txt')) assert os.path.isfile(self.temp('test_tb.txt'))
7c6dea44608631694e159d0ae81c8a7619b811516ead69d73a68921727f31377
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import signal import gzip import pytest import numpy as np from numpy.testing import assert_equal try: from PIL import Image HAS_PIL = True except ImportError: HAS_PIL = False from ....tests.helper import catch_warnings from .. import util from ..util import ignore_sigint, _rstrip_inplace from .._numpy_hacks import realign_dtype from . import FitsTestCase class TestUtils(FitsTestCase): @pytest.mark.skipif("sys.platform.startswith('win')") def test_ignore_sigint(self): @ignore_sigint def test(): with catch_warnings(UserWarning) as w: pid = os.getpid() os.kill(pid, signal.SIGINT) # One more time, for good measure os.kill(pid, signal.SIGINT) assert len(w) == 2 assert (str(w[0].message) == 'KeyboardInterrupt ignored until test is complete!') pytest.raises(KeyboardInterrupt, test) def test_realign_dtype(self): """ Tests a few corner-cases for the realign_dtype hack. These are unlikely to come in practice given how this is currently used in astropy.io.fits, but nonetheless tests for bugs that were present in earlier versions of the function. """ dt = np.dtype([('a', np.int32), ('b', np.int16)]) dt2 = realign_dtype(dt, [0, 0]) assert dt2.itemsize == 4 dt2 = realign_dtype(dt, [0, 1]) assert dt2.itemsize == 4 dt2 = realign_dtype(dt, [1, 0]) assert dt2.itemsize == 5 dt = np.dtype([('a', np.float64), ('b', np.int8), ('c', np.int8)]) dt2 = realign_dtype(dt, [0, 0, 0]) assert dt2.itemsize == 8 dt2 = realign_dtype(dt, [0, 0, 1]) assert dt2.itemsize == 8 dt2 = realign_dtype(dt, [0, 0, 27]) assert dt2.itemsize == 28 class TestUtilMode(FitsTestCase): """ The high-level tests are partially covered by test_core.TestConvenienceFunctions.test_fileobj_mode_guessing but added some low-level tests as well. """ def test_mode_strings(self): # A string signals that the file should be opened so the function # should return None, because it's simply not opened yet. assert util.fileobj_mode('tmp1.fits') is None @pytest.mark.skipif("not HAS_PIL") def test_mode_pil_image(self): img = np.random.randint(0, 255, (5, 5, 3)).astype(np.uint8) result = Image.fromarray(img) result.save(self.temp('test_simple.jpg')) # PIL doesn't support append mode. So it will allways use binary read. with Image.open(self.temp('test_simple.jpg')) as fileobj: assert util.fileobj_mode(fileobj) == 'rb' def test_mode_gzip(self): # Open a gzip in every possible (gzip is binary or "touch" only) way # and check if the mode was correctly identified. # The lists consist of tuples: filenumber, given mode, identified mode # The filenumber must be given because read expects the file to exist # and x expects it to NOT exist. num_mode_resmode = [(0, 'a', 'ab'), (0, 'ab', 'ab'), (0, 'w', 'wb'), (0, 'wb', 'wb'), (1, 'x', 'xb'), (1, 'r', 'rb'), (1, 'rb', 'rb')] for num, mode, res in num_mode_resmode: filename = self.temp('test{0}.gz'.format(num)) with gzip.GzipFile(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normal_buffering(self): # Use the python IO with buffering parameter. Binary mode only: # see "test_mode_gzip" for explanation of tuple meanings. num_mode_resmode = [(0, 'ab', 'ab'), (0, 'wb', 'wb'), (1, 'xb', 'xb'), (1, 'rb', 'rb')] for num, mode, res in num_mode_resmode: filename = self.temp('test1{0}.dat'.format(num)) with open(filename, mode, buffering=0) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normal_no_buffering(self): # Python IO without buffering # see "test_mode_gzip" for explanation of tuple meanings. num_mode_resmode = [(0, 'a', 'a'), (0, 'ab', 'ab'), (0, 'w', 'w'), (0, 'wb', 'wb'), (1, 'x', 'x'), (1, 'r', 'r'), (1, 'rb', 'rb')] for num, mode, res in num_mode_resmode: filename = self.temp('test2{0}.dat'.format(num)) with open(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_mode_normalization(self): # Use the normal python IO in append mode with all possible permutation # of the "mode" letters. # Tuple gives a file name suffix, the given mode and the functions # return. The filenumber is only for consistency with the other # test functions. Append can deal with existing and not existing files. for num, mode, res in [(0, 'a', 'a'), (0, 'a+', 'a+'), (0, 'ab', 'ab'), (0, 'a+b', 'ab+'), (0, 'ab+', 'ab+')]: filename = self.temp('test3{0}.dat'.format(num)) with open(filename, mode) as fileobj: assert util.fileobj_mode(fileobj) == res def test_rstrip_inplace(): # Incorrect type s = np.array([1, 2, 3]) with pytest.raises(TypeError) as exc: _rstrip_inplace(s) assert exc.value.args[0] == 'This function can only be used on string arrays' # Bytes array s = np.array(['a ', ' b', ' c c '], dtype='S6') _rstrip_inplace(s) assert_equal(s, np.array(['a', ' b', ' c c'], dtype='S6')) # Unicode array s = np.array(['a ', ' b', ' c c '], dtype='U6') _rstrip_inplace(s) assert_equal(s, np.array(['a', ' b', ' c c'], dtype='U6')) # 2-dimensional array s = np.array([['a ', ' b'], [' c c ', ' a ']], dtype='S6') _rstrip_inplace(s) assert_equal(s, np.array([['a', ' b'], [' c c', ' a']], dtype='S6')) # 3-dimensional array s = np.repeat(' a a ', 24).reshape((2, 3, 4)) _rstrip_inplace(s) assert_equal(s, ' a a') # 3-dimensional non-contiguous array s = np.repeat(' a a ', 1000).reshape((10, 10, 10))[:2, :3, :4] _rstrip_inplace(s) assert_equal(s, ' a a')
77e0fcb21f5431418f6a4fbfc4d92131235718729df5f9d3a631a2ce243d1db0
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import pytest import numpy as np from ..column import Column from ..diff import (FITSDiff, HeaderDiff, ImageDataDiff, TableDataDiff, HDUDiff, report_diff_values) from ..hdu import HDUList, PrimaryHDU, ImageHDU from ..hdu.table import BinTableHDU from ..header import Header from ....tests.helper import catch_warnings from ....utils.exceptions import AstropyDeprecationWarning from ....io import fits from . import FitsTestCase class TestDiff(FitsTestCase): def test_identical_headers(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() assert HeaderDiff(ha, hb).identical assert HeaderDiff(ha.tostring(), hb.tostring()).identical with pytest.raises(TypeError): HeaderDiff(1, 2) def test_slightly_different_headers(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 assert not HeaderDiff(ha, hb).identical def test_common_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 hb['D'] = (5, 'Comment') assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C'] def test_different_keyword_count(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() del hb['B'] diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_count == (3, 2) # But make sure the common keywords are at least correct assert diff.common_keywords == ['A', 'C'] def test_different_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 hb['D'] = (5, 'Comment') ha['E'] = (6, 'Comment') ha['F'] = (7, 'Comment') diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keywords == (['E', 'F'], ['D']) def test_different_keyword_values(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 4)]} def test_different_keyword_comments(self): ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')]) hb = ha.copy() hb.comments['C'] = 'comment 2' diff = HeaderDiff(ha, hb) assert not diff.identical assert (diff.diff_keyword_comments == {'C': [('comment 1', 'comment 2')]}) def test_different_keyword_values_with_duplicate(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() ha.append(('C', 4)) hb.append(('C', 5)) diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {'C': [None, (4, 5)]} def test_asymmetric_duplicate_keywords(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() ha.append(('A', 2, 'comment 1')) ha.append(('A', 3, 'comment 2')) hb.append(('B', 4, 'comment 3')) hb.append(('C', 5, 'comment 4')) diff = HeaderDiff(ha, hb) assert not diff.identical assert diff.diff_keyword_values == {} assert (diff.diff_duplicate_keywords == {'A': (3, 1), 'B': (1, 2), 'C': (1, 2)}) report = diff.report() assert ("Inconsistent duplicates of keyword 'A' :\n" " Occurs 3 time(s) in a, 1 times in (b)") in report def test_floating_point_rtol(self): ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)]) hb = ha.copy() hb['B'] = 2.00002 hb['C'] = 3.000002 diff = HeaderDiff(ha, hb) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]}) diff = HeaderDiff(ha, hb, rtol=1e-6) assert not diff.identical assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]} diff = HeaderDiff(ha, hb, rtol=1e-5) assert diff.identical def test_floating_point_atol(self): ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)]) hb = ha.copy() hb['B'] = 1.00001 hb['C'] = 0.000001 diff = HeaderDiff(ha, hb, rtol=1e-6) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]}) diff = HeaderDiff(ha, hb, rtol=1e-5) assert not diff.identical assert (diff.diff_keyword_values == {'C': [(0.0, 0.000001)]}) diff = HeaderDiff(ha, hb, atol=1e-6) assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)]}) diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality assert not diff.identical assert (diff.diff_keyword_values == {'B': [(1.0, 1.00001)]}) diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5) assert diff.identical diff = HeaderDiff(ha, hb, atol=1.1e-5) assert diff.identical diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6) assert not diff.identical def test_deprecation_tolerance(self): """Verify uses of tolerance and rtol. This test should be removed in the next astropy version.""" ha = Header([('B', 1.0), ('C', 0.1)]) hb = ha.copy() hb['B'] = 1.00001 hb['C'] = 0.100001 with catch_warnings(AstropyDeprecationWarning) as warning_lines: diff = HeaderDiff(ha, hb, tolerance=1e-6) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"tolerance" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "rtol" instead.') assert (diff.diff_keyword_values == {'C': [(0.1, 0.100001)], 'B': [(1.0, 1.00001)]}) assert not diff.identical with catch_warnings(AstropyDeprecationWarning) as warning_lines: # `rtol` is always ignored when `tolerance` is provided diff = HeaderDiff(ha, hb, rtol=1e-6, tolerance=1e-5) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"tolerance" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "rtol" instead.') assert diff.identical def test_ignore_blanks(self): with fits.conf.set_temp('strip_header_whitespace', False): ha = Header([('A', 1), ('B', 2), ('C', 'A ')]) hb = ha.copy() hb['C'] = 'A' assert ha['C'] != hb['C'] diff = HeaderDiff(ha, hb) # Trailing blanks are ignored by default assert diff.identical assert diff.diff_keyword_values == {} # Don't ignore blanks diff = HeaderDiff(ha, hb, ignore_blanks=False) assert not diff.identical assert diff.diff_keyword_values == {'C': [('A ', 'A')]} def test_ignore_blank_cards(self): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152 Ignore blank cards. """ ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)]) hc = ha.copy() hc.append() hc.append() # We now have a header with interleaved blanks, and a header with end # blanks, both of which should ignore the blanks assert HeaderDiff(ha, hb).identical assert HeaderDiff(ha, hc).identical assert HeaderDiff(hb, hc).identical assert not HeaderDiff(ha, hb, ignore_blank_cards=False).identical assert not HeaderDiff(ha, hc, ignore_blank_cards=False).identical # Both hb and hc have the same number of blank cards; since order is # currently ignored, these should still be identical even if blank # cards are not ignored assert HeaderDiff(hb, hc, ignore_blank_cards=False).identical hc.append() # But now there are different numbers of blanks, so they should not be # ignored: assert not HeaderDiff(hb, hc, ignore_blank_cards=False).identical def test_ignore_keyword_values(self): ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['B'] = 4 hb['C'] = 5 diff = HeaderDiff(ha, hb, ignore_keywords=['*']) assert diff.identical diff = HeaderDiff(ha, hb, ignore_keywords=['B']) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 5)]} report = diff.report() assert 'Keyword B has different values' not in report assert 'Keyword C has different values' in report # Test case-insensitivity diff = HeaderDiff(ha, hb, ignore_keywords=['b']) assert not diff.identical assert diff.diff_keyword_values == {'C': [(3, 5)]} def test_ignore_keyword_comments(self): ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')]) hb = ha.copy() hb.comments['B'] = 'D' hb.comments['C'] = 'E' diff = HeaderDiff(ha, hb, ignore_comments=['*']) assert diff.identical diff = HeaderDiff(ha, hb, ignore_comments=['B']) assert not diff.identical assert diff.diff_keyword_comments == {'C': [('C', 'E')]} report = diff.report() assert 'Keyword B has different comments' not in report assert 'Keyword C has different comments' in report # Test case-insensitivity diff = HeaderDiff(ha, hb, ignore_comments=['b']) assert not diff.identical assert diff.diff_keyword_comments == {'C': [('C', 'E')]} def test_trivial_identical_images(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100).reshape(10, 10) diff = ImageDataDiff(ia, ib) assert diff.identical assert diff.diff_total == 0 def test_identical_within_relative_tolerance(self): ia = np.ones((10, 10)) - 0.00001 ib = np.ones((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-4) assert diff.identical assert diff.diff_total == 0 def test_identical_within_absolute_tolerance(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-4) assert not diff.identical assert diff.diff_total == 100 diff = ImageDataDiff(ia, ib, atol=1.0e-4) assert diff.identical assert diff.diff_total == 0 def test_identical_within_rtol_and_atol(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5) assert diff.identical assert diff.diff_total == 0 def test_not_identical_within_rtol_and_atol(self): ia = np.zeros((10, 10)) - 0.00001 ib = np.zeros((10, 10)) - 0.00002 diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6) assert not diff.identical assert diff.diff_total == 100 def test_identical_comp_image_hdus(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189 For this test we mostly just care that comparing to compressed images does not crash, and returns the correct results. Two compressed images will be considered identical if the decompressed data is the same. Obviously we test whether or not the same compression was used by looking for (or ignoring) header differences. """ data = np.arange(100.0).reshape(10, 10) hdu = fits.CompImageHDU(data=data) hdu.writeto(self.temp('test.fits')) hdula = fits.open(self.temp('test.fits')) hdulb = fits.open(self.temp('test.fits')) diff = FITSDiff(hdula, hdulb) assert diff.identical def test_different_dimensions(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100) - 1 # Although ib could be reshaped into the same dimensions, for now the # data is not compared anyways diff = ImageDataDiff(ia, ib) assert not diff.identical assert diff.diff_dimensions == ((10, 10), (100,)) assert diff.diff_total == 0 report = diff.report() assert 'Data dimensions differ' in report assert 'a: 10 x 10' in report assert 'b: 100' in report assert 'No further data comparison performed.' def test_different_pixels(self): ia = np.arange(100).reshape(10, 10) ib = np.arange(100).reshape(10, 10) ib[0, 0] = 10 ib[5, 5] = 20 diff = ImageDataDiff(ia, ib) assert not diff.identical assert diff.diff_dimensions == () assert diff.diff_total == 2 assert diff.diff_ratio == 0.02 assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))] def test_identical_tables(self): c1 = Column('A', format='L', array=[True, False]) c2 = Column('B', format='X', array=[[0], [1]]) c3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) c4 = Column('D', format='J', bscale=2.0, array=[0, 1]) c5 = Column('E', format='A3', array=['abc', 'def']) c6 = Column('F', format='E', unit='m', array=[0.0, 1.0]) c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0]) c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j]) c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j]) c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]]) columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10] ta = BinTableHDU.from_columns(columns) tb = BinTableHDU.from_columns([c.copy() for c in columns]) diff = TableDataDiff(ta.data, tb.data) assert diff.identical assert len(diff.common_columns) == 10 assert diff.common_column_names == set('abcdefghij') assert diff.diff_ratio == 0 assert diff.diff_total == 0 def test_diff_empty_tables(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178 Ensure that diffing tables containing empty data doesn't crash. """ c1 = Column('D', format='J') c2 = Column('E', format='J') thdu = BinTableHDU.from_columns([c1, c2], nrows=0) hdula = fits.HDUList([thdu]) hdulb = fits.HDUList([thdu]) diff = FITSDiff(hdula, hdulb) assert diff.identical def test_ignore_table_fields(self): c1 = Column('A', format='L', array=[True, False]) c2 = Column('B', format='X', array=[[0], [1]]) c3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) c4 = Column('B', format='X', array=[[1], [0]]) c5 = Column('C', format='4I', dim='(2, 2)', array=[[1, 2, 3, 4], [5, 6, 7, 8]]) ta = BinTableHDU.from_columns([c1, c2, c3]) tb = BinTableHDU.from_columns([c1, c4, c5]) diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C']) assert diff.identical # The only common column should be c1 assert len(diff.common_columns) == 1 assert diff.common_column_names == {'a'} assert diff.diff_ratio == 0 assert diff.diff_total == 0 def test_different_table_field_names(self): ca = Column('A', format='L', array=[True, False]) cb = Column('B', format='L', array=[True, False]) cc = Column('C', format='L', array=[True, False]) ta = BinTableHDU.from_columns([ca, cb]) tb = BinTableHDU.from_columns([ca, cc]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert len(diff.common_columns) == 1 assert diff.common_column_names == {'a'} assert diff.diff_column_names == (['B'], ['C']) assert diff.diff_ratio == 0 assert diff.diff_total == 0 report = diff.report() assert 'Extra column B of format L in a' in report assert 'Extra column C of format L in b' in report def test_different_table_field_counts(self): """ Test tables with some common columns, but different number of columns overall. """ ca = Column('A', format='L', array=[True, False]) cb = Column('B', format='L', array=[True, False]) cc = Column('C', format='L', array=[True, False]) ta = BinTableHDU.from_columns([cb]) tb = BinTableHDU.from_columns([ca, cb, cc]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert diff.diff_column_count == (1, 3) assert len(diff.common_columns) == 1 assert diff.common_column_names == {'b'} assert diff.diff_column_names == ([], ['A', 'C']) assert diff.diff_ratio == 0 assert diff.diff_total == 0 report = diff.report() assert ' Tables have different number of columns:' in report assert ' a: 1\n b: 3' in report def test_different_table_rows(self): """ Test tables that are otherwise identical but one has more rows than the other. """ ca1 = Column('A', format='L', array=[True, False]) cb1 = Column('B', format='L', array=[True, False]) ca2 = Column('A', format='L', array=[True, False, True]) cb2 = Column('B', format='L', array=[True, False, True]) ta = BinTableHDU.from_columns([ca1, cb1]) tb = BinTableHDU.from_columns([ca2, cb2]) diff = TableDataDiff(ta.data, tb.data) assert not diff.identical assert diff.diff_column_count == () assert len(diff.common_columns) == 2 assert diff.diff_rows == (2, 3) assert diff.diff_values == [] report = diff.report() assert 'Table rows differ' in report assert 'a: 2' in report assert 'b: 3' in report assert 'No further data comparison performed.' def test_different_table_data(self): """ Test diffing table data on columns of several different data formats and dimensions. """ ca1 = Column('A', format='L', array=[True, False]) ca2 = Column('B', format='X', array=[[0], [1]]) ca3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [4, 5, 6, 7]]) ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0]) ca5 = Column('E', format='A3', array=['abc', 'def']) ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0]) ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0]) ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j]) ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j]) ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]]) cb1 = Column('A', format='L', array=[False, False]) cb2 = Column('B', format='X', array=[[0], [0]]) cb3 = Column('C', format='4I', dim='(2, 2)', array=[[0, 1, 2, 3], [5, 6, 7, 8]]) cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0]) cb5 = Column('E', format='A3', array=['abc', 'ghi']) cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0]) cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0]) cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j]) cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j]) cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]]) ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7, ca8, ca9, ca10]) tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7, cb8, cb9, cb10]) diff = TableDataDiff(ta.data, tb.data, numdiffs=20) assert not diff.identical # The column definitions are the same, but not the column values assert diff.diff_columns == () assert diff.diff_values[0] == (('A', 0), (True, False)) assert diff.diff_values[1] == (('B', 1), ([1], [0])) assert diff.diff_values[2][0] == ('C', 1) assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all() assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all() assert diff.diff_values[3] == (('D', 0), (0, 2.0)) assert diff.diff_values[4] == (('E', 1), ('def', 'ghi')) assert diff.diff_values[5] == (('F', 0), (0.0, 1.0)) assert diff.diff_values[6] == (('F', 1), (1.0, 2.0)) assert diff.diff_values[7] == (('G', 0), (0.0, 2.0)) assert diff.diff_values[8] == (('G', 1), (1.0, 3.0)) assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j)) assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j)) assert diff.diff_values[11][0] == ('J', 0) assert (diff.diff_values[11][1][0] == [0, 1]).all() assert (diff.diff_values[11][1][1] == [1, 2]).all() assert diff.diff_values[12][0] == ('J', 1) assert (diff.diff_values[12][1][0] == [2, 3]).all() assert (diff.diff_values[12][1][1] == [3, 4]).all() assert diff.diff_total == 13 assert diff.diff_ratio == 0.65 report = diff.report() assert ('Column A data differs in row 0:\n' ' a> True\n' ' b> False') in report assert ('...and at 13 more indices.\n' ' Column D data differs in row 0:') in report assert ('13 different table data element(s) found (65.00% different)' in report) assert report.count('more indices') == 1 def test_identical_files_basic(self): """Test identicality of two simple, extensionless files.""" a = np.arange(100).reshape(10, 10) hdu = PrimaryHDU(data=a) hdu.writeto(self.temp('testa.fits')) hdu.writeto(self.temp('testb.fits')) diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits')) assert diff.identical report = diff.report() # Primary HDUs should contain no differences assert 'Primary HDU' not in report assert 'Extension HDU' not in report assert 'No differences found.' in report a = np.arange(10) ehdu = ImageHDU(data=a) diff = HDUDiff(ehdu, ehdu) assert diff.identical report = diff.report() assert 'No differences found.' in report def test_partially_identical_files1(self): """ Test files that have some identical HDUs but a different extension count. """ a = np.arange(100).reshape(10, 10) phdu = PrimaryHDU(data=a) ehdu = ImageHDU(data=a) hdula = HDUList([phdu, ehdu]) hdulb = HDUList([phdu, ehdu, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdu_count == (2, 3) # diff_hdus should be empty, since the third extension in hdulb # has nothing to compare against assert diff.diff_hdus == [] report = diff.report() assert 'Files contain different numbers of HDUs' in report assert 'a: 2\n b: 3' in report assert 'No differences found between common HDUs' in report def test_partially_identical_files2(self): """ Test files that have some identical HDUs but one different HDU. """ a = np.arange(100).reshape(10, 10) phdu = PrimaryHDU(data=a) ehdu = ImageHDU(data=a) ehdu2 = ImageHDU(data=(a + 1)) hdula = HDUList([phdu, ehdu, ehdu]) hdulb = HDUList([phdu, ehdu2, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdu_count == () assert len(diff.diff_hdus) == 1 assert diff.diff_hdus[0][0] == 1 hdudiff = diff.diff_hdus[0][1] assert not hdudiff.identical assert hdudiff.diff_extnames == () assert hdudiff.diff_extvers == () assert hdudiff.diff_extension_types == () assert hdudiff.diff_headers.identical assert hdudiff.diff_data is not None datadiff = hdudiff.diff_data assert isinstance(datadiff, ImageDataDiff) assert not datadiff.identical assert datadiff.diff_dimensions == () assert (datadiff.diff_pixels == [((0, y), (y, y + 1)) for y in range(10)]) assert datadiff.diff_ratio == 1.0 assert datadiff.diff_total == 100 report = diff.report() # Primary HDU and 2nd extension HDU should have no differences assert 'Primary HDU' not in report assert 'Extension HDU 2' not in report assert 'Extension HDU 1' in report assert 'Headers contain differences' not in report assert 'Data contains differences' in report for y in range(10): assert 'Data differs at [{}, 1]'.format(y + 1) in report assert '100 different pixels found (100.00% different).' in report def test_partially_identical_files3(self): """ Test files that have some identical HDUs but a different extension name. """ phdu = PrimaryHDU() ehdu = ImageHDU(name='FOO') hdula = HDUList([phdu, ehdu]) ehdu = BinTableHDU(name='BAR') ehdu.header['EXTVER'] = 2 ehdu.header['EXTLEVEL'] = 3 hdulb = HDUList([phdu, ehdu]) diff = FITSDiff(hdula, hdulb) assert not diff.identical assert diff.diff_hdus[0][0] == 1 hdu_diff = diff.diff_hdus[0][1] assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE') assert hdu_diff.diff_extnames == ('FOO', 'BAR') assert hdu_diff.diff_extvers == (1, 2) assert hdu_diff.diff_extlevels == (1, 3) report = diff.report() assert 'Extension types differ' in report assert 'a: IMAGE\n b: BINTABLE' in report assert 'Extension names differ' in report assert 'a: FOO\n b: BAR' in report assert 'Extension versions differ' in report assert 'a: 1\n b: 2' in report assert 'Extension levels differ' in report assert 'a: 1\n b: 2' in report def test_diff_nans(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204""" # First test some arrays that should be equivalent.... arr = np.empty((10, 10), dtype=np.float64) arr[:5] = 1.0 arr[5:] = np.nan arr2 = arr.copy() table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)], names=['cola', 'colb']).view(fits.FITS_rec) table2 = table.copy() assert ImageDataDiff(arr, arr2).identical assert TableDataDiff(table, table2).identical # Now let's introduce some differences, where there are nans and where # there are not nans arr2[0][0] = 2.0 arr2[5][0] = 2.0 table2[0][0] = 2.0 table2[1][1] = 2.0 diff = ImageDataDiff(arr, arr2) assert not diff.identical assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0)) assert diff.diff_pixels[1][0] == (5, 0) assert np.isnan(diff.diff_pixels[1][1][0]) assert diff.diff_pixels[1][1][1] == 2.0 diff = TableDataDiff(table, table2) assert not diff.identical assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0)) assert diff.diff_values[1][0] == ('colb', 1) assert np.isnan(diff.diff_values[1][1][0]) assert diff.diff_values[1][1][1] == 2.0 def test_diff_types(self): """ Regression test for https://github.com/astropy/astropy/issues/4122 """ f = io.StringIO() a = 1.0 b = '1.0' report_diff_values(f, a, b) out = f.getvalue() assert out.lstrip('u') == " (float) a> 1.0\n (str) b> '1.0'\n ? + +\n" def test_float_comparison(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/21 """ f = io.StringIO() a = np.float32(0.029751372) b = np.float32(0.029751368) report_diff_values(f, a, b) out = f.getvalue() # This test doesn't care about what the exact output is, just that it # did show a difference in their text representations assert 'a>' in out assert 'b>' in out def test_file_output_from_path_string(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) report_as_string = diffobj.report() assert open(outpath).read() == report_as_string def test_file_output_overwrite_safety(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) with pytest.raises(OSError): diffobj.report(fileobj=outpath) def test_file_output_overwrite_success(self): outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) report_as_string = diffobj.report() diffobj.report(fileobj=outpath, overwrite=True) assert open(outpath).read() == report_as_string, ("overwritten output " "file is not identical to report string") def test_file_output_overwrite_vs_clobber(self): """Verify uses of clobber and overwrite.""" outpath = self.temp('diff_output.txt') ha = Header([('A', 1), ('B', 2), ('C', 3)]) hb = ha.copy() hb['C'] = 4 diffobj = HeaderDiff(ha, hb) diffobj.report(fileobj=outpath) report_as_string = diffobj.report() with catch_warnings(AstropyDeprecationWarning) as warning_lines: diffobj.report(fileobj=outpath, clobber=True) assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"clobber" was ' 'deprecated in version 2.0 and will be removed in a ' 'future version. Use argument "overwrite" instead.')
c829afa4fdbb548e12b7f8ef27683e5a4a22c01fbf27aa7ad1fde244be4e1e91
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import warnings import collections from io import StringIO, BytesIO import pytest import numpy as np from ....io import fits from ....io.fits.verify import VerifyWarning from ....tests.helper import catch_warnings, ignore_warnings from . import FitsTestCase from ..card import _pad from ..header import _pad_length from ..util import encode_ascii def test_shallow_copy(): """Make sure that operations on a shallow copy do not alter the original. #4990.""" original_header = fits.Header([('a', 1), ('b', 1)]) copied_header = copy.copy(original_header) # Modifying the original dict should not alter the copy original_header['c'] = 100 assert 'c' not in copied_header # and changing the copy should not change the original. copied_header['a'] = 0 assert original_header['a'] == 1 def test_init_with_header(): """Make sure that creating a Header from another Header makes a copy if copy is True.""" original_header = fits.Header([('a', 10)]) new_header = fits.Header(original_header, copy=True) original_header['a'] = 20 assert new_header['a'] == 10 new_header['a'] = 0 assert original_header['a'] == 20 def test_init_with_dict(): dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15} h1 = fits.Header(dict1) for i in dict1: assert dict1[i] == h1[i] def test_init_with_ordereddict(): # Create a list of tuples. Each tuple consisting of a letter and the number list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')] # Create an ordered dictionary and a header from this dictionary dict1 = collections.OrderedDict(list1) h1 = fits.Header(dict1) # Check that the order is preserved of the initial list assert all(h1[val] == list1[i][1] for i, val in enumerate(h1)) class TestHeaderFunctions(FitsTestCase): """Test Header and Card objects.""" def test_rename_keyword(self): """Test renaming keyword with rename_keyword.""" header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')]) header.rename_keyword('A', 'B') assert 'A' not in header assert 'B' in header assert header[0] == 'B' assert header['B'] == 'B' assert header.comments['B'] == 'C' def test_card_constructor_default_args(self): """Test Card constructor with default argument values.""" c = fits.Card() assert '' == c.keyword def test_string_value_card(self): """Test Card constructor with string value""" c = fits.Card('abc', '<8 ch') assert str(c) == _pad("ABC = '<8 ch '") c = fits.Card('nullstr', '') assert str(c) == _pad("NULLSTR = ''") def test_boolean_value_card(self): """Test Card constructor with boolean value""" c = fits.Card("abc", True) assert str(c) == _pad("ABC = T") c = fits.Card.fromstring('ABC = F') assert c.value is False def test_long_integer_value_card(self): """Test Card constructor with long integer value""" c = fits.Card('long_int', -467374636747637647347374734737437) assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437") def test_floating_point_value_card(self): """Test Card constructor with floating point value""" c = fits.Card('floatnum', -467374636747637647347374734737437.) if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad("FLOATNUM= -4.6737463674763E+032")): assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32") def test_complex_value_card(self): """Test Card constructor with complex value""" c = fits.Card('abc', (1.2345377437887837487e88 + 6324767364763746367e-33j)) f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)") f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)") f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)") if str(c) != f1 and str(c) != f2: assert str(c) == f3 def test_card_image_constructed_too_long(self): """Test that over-long cards truncate the comment""" # card image constructed from key/value/comment is too long # (non-string value) with ignore_warnings(): c = fits.Card('abc', 9, 'abcde' * 20) assert (str(c) == "ABC = 9 " "/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab") c = fits.Card('abc', 'a' * 68, 'abcdefg') assert str(c) == "ABC = '{}'".format('a' * 68) def test_constructor_filter_illegal_data_structures(self): """Test that Card constructor raises exceptions on bad arguments""" pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)}) pytest.raises(ValueError, fits.Card, 'key', [], 'comment') def test_keyword_too_long(self): """Test that long Card keywords are allowed, but with a warning""" with catch_warnings(): warnings.simplefilter('error') pytest.raises(UserWarning, fits.Card, 'abcdefghi', 'long') def test_illegal_characters_in_key(self): """ Test that Card constructor allows illegal characters in the keyword, but creates a HIERARCH card. """ # This test used to check that a ValueError was raised, because a # keyword like 'abc+' was simply not allowed. Now it should create a # HIERARCH card. with catch_warnings() as w: c = fits.Card('abc+', 9) assert len(w) == 1 assert c.image == _pad('HIERARCH abc+ = 9') def test_add_commentary(self): header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1), ('HISTORY', 2), ('HISTORY', 3), ('', '', ''), ('', '', '')]) header.add_history(4) # One of the blanks should get used, so the length shouldn't change assert len(header) == 6 assert header.cards[4].value == 4 assert header['HISTORY'] == [1, 2, 3, 4] header.add_history(0, after='A') assert len(header) == 6 assert header.cards[1].value == 0 assert header['HISTORY'] == [0, 1, 2, 3, 4] header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3), ('', '', ''), ('', '', '')]) header.add_blank(4) # This time a new blank should be added, and the existing blanks don't # get used... (though this is really kinda sketchy--there's a # distinction between truly blank cards, and cards with blank keywords # that isn't currently made int he code) assert len(header) == 7 assert header.cards[6].value == 4 assert header[''] == [1, 2, 3, '', '', 4] header.add_blank(0, after='A') assert len(header) == 8 assert header.cards[1].value == 0 assert header[''] == [0, 1, 2, 3, '', '', 4] def test_update(self): class FakeHeader(list): def keys(self): return [l[0] for l in self] def __getitem__(self, key): return next(l[1:] for l in self if l[0] == key) header = fits.Header() header.update({'FOO': ('BAR', 'BAZ')}) header.update(FakeHeader([('A', 1), ('B', 2, 'comment')])) assert set(header.keys()) == {'FOO', 'A', 'B'} assert header.comments['B'] == 'comment' header.update(NAXIS1=100, NAXIS2=100) assert set(header.keys()) == {'FOO', 'A', 'B', 'NAXIS1', 'NAXIS2'} assert set(header.values()) == {'BAR', 1, 2, 100, 100} def test_update_comment(self): hdul = fits.open(self.data('arange.fits')) hdul[0].header.update({'FOO': ('BAR', 'BAZ')}) assert hdul[0].header['FOO'] == 'BAR' assert hdul[0].header.comments['FOO'] == 'BAZ' with pytest.raises(ValueError): hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')}) hdul.writeto(self.temp('test.fits')) hdul.close() hdul = fits.open(self.temp('test.fits'), mode='update') hdul[0].header.comments['FOO'] = 'QUX' hdul.close() hdul = fits.open(self.temp('test.fits')) assert hdul[0].header.comments['FOO'] == 'QUX' hdul[0].header.add_comment(0, after='FOO') assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0' hdul.close() def test_commentary_cards(self): # commentary cards val = "A commentary card's value has no quotes around it." c = fits.Card("HISTORY", val) assert str(c) == _pad('HISTORY ' + val) val = "A commentary card has no comment." c = fits.Card("COMMENT", val, "comment") assert str(c) == _pad('COMMENT ' + val) def test_commentary_card_created_by_fromstring(self): # commentary card created by fromstring() c = fits.Card.fromstring( "COMMENT card has no comments. " "/ text after slash is still part of the value.") assert (c.value == 'card has no comments. ' '/ text after slash is still part of the value.') assert c.comment == '' def test_commentary_card_will_not_parse_numerical_value(self): # commentary card will not parse the numerical value c = fits.Card.fromstring("HISTORY (1, 2)") assert str(c) == _pad("HISTORY (1, 2)") def test_equal_sign_after_column8(self): # equal sign after column 8 of a commentary card will be part ofthe # string value c = fits.Card.fromstring("HISTORY = (1, 2)") assert str(c) == _pad("HISTORY = (1, 2)") def test_blank_keyword(self): c = fits.Card('', ' / EXPOSURE INFORMATION') assert str(c) == _pad(' / EXPOSURE INFORMATION') c = fits.Card.fromstring(str(c)) assert c.keyword == '' assert c.value == ' / EXPOSURE INFORMATION' def test_specify_undefined_value(self): # this is how to specify an undefined value c = fits.Card("undef", fits.card.UNDEFINED) assert str(c) == _pad("UNDEF =") def test_complex_number_using_string_input(self): # complex number using string input c = fits.Card.fromstring('ABC = (8, 9)') assert str(c) == _pad("ABC = (8, 9)") def test_fixable_non_standard_fits_card(self, capsys): # fixable non-standard FITS card will keep the original format c = fits.Card.fromstring('abc = + 2.1 e + 12') assert c.value == 2100000000000.0 assert str(c) == _pad("ABC = +2.1E+12") def test_fixable_non_fsc(self): # fixable non-FSC: if the card is not parsable, it's value will be # assumed # to be a string and everything after the first slash will be comment c = fits.Card.fromstring( "no_quote= this card's value has no quotes " "/ let's also try the comment") assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' " "/ let's also try the comment ") def test_undefined_value_using_string_input(self): # undefined value using string input c = fits.Card.fromstring('ABC = ') assert str(c) == _pad("ABC =") def test_mislocated_equal_sign(self, capsys): # test mislocated "=" sign c = fits.Card.fromstring('XYZ= 100') assert c.keyword == 'XYZ' assert c.value == 100 assert str(c) == _pad("XYZ = 100") def test_equal_only_up_to_column_10(self, capsys): # the test of "=" location is only up to column 10 # This test used to check if Astropy rewrote this card to a new format, # something like "HISTO = '= (1, 2)". But since ticket #109 if the # format is completely wrong we don't make any assumptions and the card # should be left alone c = fits.Card.fromstring("HISTO = (1, 2)") assert str(c) == _pad("HISTO = (1, 2)") # Likewise this card should just be left in its original form and # we shouldn't guess how to parse it or rewrite it. c = fits.Card.fromstring(" HISTORY (1, 2)") assert str(c) == _pad(" HISTORY (1, 2)") def test_verify_invalid_equal_sign(self): # verification c = fits.Card.fromstring('ABC= a6') with catch_warnings() as w: c.verify() err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at " "column 8)") err_text2 = ("Card 'ABC' is not FITS standard (invalid value " "string: 'a6'") assert len(w) == 4 assert err_text1 in str(w[1].message) assert err_text2 in str(w[2].message) def test_fix_invalid_equal_sign(self): c = fits.Card.fromstring('ABC= a6') with catch_warnings() as w: c.verify('fix') fix_text = "Fixed 'ABC' card to meet the FITS standard." assert len(w) == 4 assert fix_text in str(w[1].message) assert str(c) == _pad("ABC = 'a6 '") def test_long_string_value(self): # test long string value c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10) assert (str(c) == "ABC = 'long string value long string value long string value long string &' " "CONTINUE 'value long string value long string value long string value long &' " "CONTINUE 'string value long string value long string value &' " "CONTINUE '&' / long comment long comment long comment long comment long " "CONTINUE '&' / comment long comment long comment long comment long comment " "CONTINUE '' / long comment ") def test_long_unicode_string(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/1 So long as a unicode string can be converted to ASCII it should have no different behavior in this regard from a byte string. """ h1 = fits.Header() h1['TEST'] = 'abcdefg' * 30 h2 = fits.Header() with catch_warnings() as w: h2['TEST'] = 'abcdefg' * 30 assert len(w) == 0 assert str(h1) == str(h2) def test_long_string_repr(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193 Ensure that the __repr__() for cards represented with CONTINUE cards is split across multiple lines (broken at each *physical* card). """ header = fits.Header() header['TEST1'] = ('Regular value', 'Regular comment') header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10) header['TEST3'] = ('Regular value', 'Regular comment') assert (repr(header).splitlines() == [str(fits.Card('TEST1', 'Regular value', 'Regular comment')), "TEST2 = 'long string value long string value long string value long string &' ", "CONTINUE 'value long string value long string value long string value long &' ", "CONTINUE 'string value long string value long string value &' ", "CONTINUE '&' / long comment long comment long comment long comment long ", "CONTINUE '&' / comment long comment long comment long comment long comment ", "CONTINUE '' / long comment ", str(fits.Card('TEST3', 'Regular value', 'Regular comment'))]) def test_blank_keyword_long_value(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194 Test that a blank keyword ('') can be assigned a too-long value that is continued across multiple cards with blank keywords, just like COMMENT and HISTORY cards. """ value = 'long string value ' * 10 header = fits.Header() header[''] = value assert len(header) == 3 assert ' '.join(header['']) == value.rstrip() # Ensure that this works like other commentary keywords header['COMMENT'] = value header['HISTORY'] = value assert header['COMMENT'] == header['HISTORY'] assert header['COMMENT'] == header[''] def test_long_string_from_file(self): c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10) hdu = fits.PrimaryHDU() hdu.header.append(c) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) c = hdul[0].header.cards['abc'] hdul.close() assert (str(c) == "ABC = 'long string value long string value long string value long string &' " "CONTINUE 'value long string value long string value long string value long &' " "CONTINUE 'string value long string value long string value &' " "CONTINUE '&' / long comment long comment long comment long comment long " "CONTINUE '&' / comment long comment long comment long comment long comment " "CONTINUE '' / long comment ") def test_word_in_long_string_too_long(self): # if a word in a long string is too long, it will be cut in the middle c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10) assert (str(c) == "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'" "CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'" "CONTINUE 'elongstringvalue&' " "CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme" "CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ") def test_long_string_value_via_fromstring(self, capsys): # long string value via fromstring() method c = fits.Card.fromstring( _pad("abc = 'longstring''s testing & ' " "/ comments in line 1") + _pad("continue 'continue with long string but without the " "ampersand at the end' /") + _pad("continue 'continue must have string value (with quotes)' " "/ comments with ''. ")) assert (str(c) == "ABC = 'longstring''s testing continue with long string but without the &' " "CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' " "CONTINUE '' / comments in line 1 comments with ''. ") def test_continue_card_with_equals_in_value(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117 """ c = fits.Card.fromstring( _pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") + _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") + _pad("CONTINUE '&' / pysyn expression")) assert c.keyword == 'EXPR' assert (c.value == '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits ' '* 5.87359e-12 * MWAvg(Av=0.12)') assert c.comment == 'pysyn expression' def test_final_continue_card_lacks_ampersand(self): """ Regression test for https://github.com/astropy/astropy/issues/3282 """ h = fits.Header() h['SVALUE'] = 'A' * 69 assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'") def test_final_continue_card_ampersand_removal_on_long_comments(self): """ Regression test for https://github.com/astropy/astropy/issues/3282 """ c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10) assert (str(c) == "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' " "CONTINUE 'valuelong valuelong valuelong value&' " "CONTINUE '&' / long comment &long comment &long comment &long comment &long " "CONTINUE '&' / comment &long comment &long comment &long comment &long comment " "CONTINUE '' / &long comment & ") def test_hierarch_card_creation(self): # Test automatic upgrade to hierarch card with catch_warnings() as w: c = fits.Card('ESO INS SLIT2 Y1FRML', 'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)') assert len(w) == 1 assert 'HIERARCH card will be created' in str(w[0].message) assert (str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= " "'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'") # Test manual creation of hierarch card c = fits.Card('hierarch abcdefghi', 10) assert str(c) == _pad("HIERARCH abcdefghi = 10") c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML', 'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)') assert (str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= " "'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'") def test_hierarch_with_abbrev_value_indicator(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/5 """ c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'") assert c.keyword == 'key.META_4' assert c.value == 'calFileVersion' assert c.comment == '' def test_hierarch_keyword_whitespace(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/6 Make sure any leading or trailing whitespace around HIERARCH keywords is stripped from the actual keyword value. """ c = fits.Card.fromstring( "HIERARCH key.META_4 = 'calFileVersion'") assert c.keyword == 'key.META_4' assert c.value == 'calFileVersion' assert c.comment == '' # Test also with creation via the Card constructor c = fits.Card('HIERARCH key.META_4', 'calFileVersion') assert c.keyword == 'key.META_4' assert c.value == 'calFileVersion' assert c.comment == '' def test_verify_mixed_case_hierarch(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/7 Assures that HIERARCH keywords with lower-case characters and other normally invalid keyword characters are not considered invalid. """ c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment') # This should not raise any exceptions c.verify('exception') assert c.keyword == 'WeirdCard.~!@#_^$%&' assert c.value == 'The value' assert c.comment == 'a comment' # Test also the specific case from the original bug report header = fits.Header([ ('simple', True), ('BITPIX', 8), ('NAXIS', 0), ('EXTEND', True, 'May contain datasets'), ('HIERARCH key.META_0', 'detRow') ]) hdu = fits.PrimaryHDU(header=header) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: header2 = hdul[0].header assert (str(header.cards[header.index('key.META_0')]) == str(header2.cards[header2.index('key.META_0')])) def test_missing_keyword(self): """Test that accessing a non-existent keyword raises a KeyError.""" header = fits.Header() pytest.raises(KeyError, lambda k: header[k], 'NAXIS') # Test the exception message try: header['NAXIS'] except KeyError as e: assert e.args[0] == "Keyword 'NAXIS' not found." def test_hierarch_card_lookup(self): header = fits.Header() header['hierarch abcdefghi'] = 10 assert 'abcdefghi' in header assert header['abcdefghi'] == 10 # This used to be assert_false, but per ticket # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords # should be treated case-insensitively when performing lookups assert 'ABCDEFGHI' in header def test_hierarch_card_delete(self): header = fits.Header() header['hierarch abcdefghi'] = 10 del header['hierarch abcdefghi'] def test_hierarch_card_insert_delete(self): header = fits.Header() header['abcdefghi'] = 10 header['abcdefgh'] = 10 header['abcdefg'] = 10 header.insert(2, ('abcdefghij', 10)) del header['abcdefghij'] header.insert(2, ('abcdefghij', 10)) del header[2] assert list(header.keys())[2] == 'abcdefg'.upper() def test_hierarch_create_and_update(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158 Tests several additional use cases for working with HIERARCH cards. """ msg = 'a HIERARCH card will be created' header = fits.Header() with catch_warnings(VerifyWarning) as w: header.update({'HIERARCH BLAH BLAH': 'TESTA'}) assert len(w) == 0 assert 'BLAH BLAH' in header assert header['BLAH BLAH'] == 'TESTA' header.update({'HIERARCH BLAH BLAH': 'TESTB'}) assert len(w) == 0 assert header['BLAH BLAH'], 'TESTB' # Update without explicitly stating 'HIERARCH': header.update({'BLAH BLAH': 'TESTC'}) assert len(w) == 1 assert len(header) == 1 assert header['BLAH BLAH'], 'TESTC' # Test case-insensitivity header.update({'HIERARCH blah blah': 'TESTD'}) assert len(w) == 1 assert len(header) == 1 assert header['blah blah'], 'TESTD' header.update({'blah blah': 'TESTE'}) assert len(w) == 2 assert len(header) == 1 assert header['blah blah'], 'TESTE' # Create a HIERARCH card > 8 characters without explicitly stating # 'HIERARCH' header.update({'BLAH BLAH BLAH': 'TESTA'}) assert len(w) == 3 assert msg in str(w[0].message) header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'}) assert len(w) == 3 assert header['BLAH BLAH BLAH'], 'TESTB' # Update without explicitly stating 'HIERARCH': header.update({'BLAH BLAH BLAH': 'TESTC'}) assert len(w) == 4 assert header['BLAH BLAH BLAH'], 'TESTC' # Test case-insensitivity header.update({'HIERARCH blah blah blah': 'TESTD'}) assert len(w) == 4 assert header['blah blah blah'], 'TESTD' header.update({'blah blah blah': 'TESTE'}) assert len(w) == 5 assert header['blah blah blah'], 'TESTE' def test_short_hierarch_create_and_update(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158 Tests several additional use cases for working with HIERARCH cards, specifically where the keyword is fewer than 8 characters, but contains invalid characters such that it can only be created as a HIERARCH card. """ msg = 'a HIERARCH card will be created' header = fits.Header() with catch_warnings(VerifyWarning) as w: header.update({'HIERARCH BLA BLA': 'TESTA'}) assert len(w) == 0 assert 'BLA BLA' in header assert header['BLA BLA'] == 'TESTA' header.update({'HIERARCH BLA BLA': 'TESTB'}) assert len(w) == 0 assert header['BLA BLA'], 'TESTB' # Update without explicitly stating 'HIERARCH': header.update({'BLA BLA': 'TESTC'}) assert len(w) == 1 assert header['BLA BLA'], 'TESTC' # Test case-insensitivity header.update({'HIERARCH bla bla': 'TESTD'}) assert len(w) == 1 assert len(header) == 1 assert header['bla bla'], 'TESTD' header.update({'bla bla': 'TESTE'}) assert len(w) == 2 assert len(header) == 1 assert header['bla bla'], 'TESTE' header = fits.Header() with catch_warnings(VerifyWarning) as w: # Create a HIERARCH card containing invalid characters without # explicitly stating 'HIERARCH' header.update({'BLA BLA': 'TESTA'}) print([x.category for x in w]) assert len(w) == 1 assert msg in str(w[0].message) header.update({'HIERARCH BLA BLA': 'TESTB'}) assert len(w) == 1 assert header['BLA BLA'], 'TESTB' # Update without explicitly stating 'HIERARCH': header.update({'BLA BLA': 'TESTC'}) assert len(w) == 2 assert header['BLA BLA'], 'TESTC' # Test case-insensitivity header.update({'HIERARCH bla bla': 'TESTD'}) assert len(w) == 2 assert len(header) == 1 assert header['bla bla'], 'TESTD' header.update({'bla bla': 'TESTE'}) assert len(w) == 3 assert len(header) == 1 assert header['bla bla'], 'TESTE' def test_header_setitem_invalid(self): header = fits.Header() def test(): header['FOO'] = ('bar', 'baz', 'qux') pytest.raises(ValueError, test) def test_header_setitem_1tuple(self): header = fits.Header() header['FOO'] = ('BAR',) header['FOO2'] = (None,) assert header['FOO'] == 'BAR' assert header['FOO2'] == '' assert header[0] == 'BAR' assert header.comments[0] == '' assert header.comments['FOO'] == '' def test_header_setitem_2tuple(self): header = fits.Header() header['FOO'] = ('BAR', 'BAZ') header['FOO2'] = (None, None) assert header['FOO'] == 'BAR' assert header['FOO2'] == '' assert header[0] == 'BAR' assert header.comments[0] == 'BAZ' assert header.comments['FOO'] == 'BAZ' assert header.comments['FOO2'] == '' def test_header_set_value_to_none(self): """ Setting the value of a card to None should simply give that card a blank value. """ header = fits.Header() header['FOO'] = 'BAR' assert header['FOO'] == 'BAR' header['FOO'] = None assert header['FOO'] == '' def test_set_comment_only(self): header = fits.Header([('A', 'B', 'C')]) header.set('A', comment='D') assert header['A'] == 'B' assert header.comments['A'] == 'D' def test_header_iter(self): header = fits.Header([('A', 'B'), ('C', 'D')]) assert list(header) == ['A', 'C'] def test_header_slice(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) newheader = header[1:] assert len(newheader) == 2 assert 'A' not in newheader assert 'C' in newheader assert 'E' in newheader newheader = header[::-1] assert len(newheader) == 3 assert newheader[0] == 'F' assert newheader[1] == 'D' assert newheader[2] == 'B' newheader = header[::2] assert len(newheader) == 2 assert 'A' in newheader assert 'C' not in newheader assert 'E' in newheader def test_header_slice_assignment(self): """ Assigning to a slice should just assign new values to the cards included in the slice. """ header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) # Test assigning slice to the same value; this works similarly to numpy # arrays header[1:] = 1 assert header[1] == 1 assert header[2] == 1 # Though strings are iterable they should be treated as a scalar value header[1:] = 'GH' assert header[1] == 'GH' assert header[2] == 'GH' # Now assign via an iterable header[1:] = ['H', 'I'] assert header[1] == 'H' assert header[2] == 'I' def test_header_slice_delete(self): """Test deleting a slice of cards from the header.""" header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) del header[1:] assert len(header) == 1 assert header[0] == 'B' del header[:] assert len(header) == 0 def test_wildcard_slice(self): """Test selecting a subsection of a header via wildcard matching.""" header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)]) newheader = header['AB*'] assert len(newheader) == 2 assert newheader[0] == 0 assert newheader[1] == 2 def test_wildcard_with_hyphen(self): """ Regression test for issue where wildcards did not work on keywords containing hyphens. """ header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)]) assert len(header['DATE*']) == 3 assert len(header['DATE?*']) == 2 assert len(header['DATE-*']) == 2 def test_wildcard_slice_assignment(self): """Test assigning to a header slice selected via wildcard matching.""" header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)]) # Test assigning slice to the same value; this works similarly to numpy # arrays header['AB*'] = 1 assert header[0] == 1 assert header[2] == 1 # Though strings are iterable they should be treated as a scalar value header['AB*'] = 'GH' assert header[0] == 'GH' assert header[2] == 'GH' # Now assign via an iterable header['AB*'] = ['H', 'I'] assert header[0] == 'H' assert header[2] == 'I' def test_wildcard_slice_deletion(self): """Test deleting cards from a header that match a wildcard pattern.""" header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)]) del header['AB*'] assert len(header) == 1 assert header[0] == 1 def test_header_history(self): header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2), ('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)]) assert header['HISTORY'] == [1, 2, 4, 5] def test_header_clear(self): header = fits.Header([('A', 'B'), ('C', 'D')]) header.clear() assert 'A' not in header assert 'C' not in header assert len(header) == 0 def test_header_fromkeys(self): header = fits.Header.fromkeys(['A', 'B']) assert 'A' in header assert header['A'] == '' assert header.comments['A'] == '' assert 'B' in header assert header['B'] == '' assert header.comments['B'] == '' def test_header_fromkeys_with_value(self): header = fits.Header.fromkeys(['A', 'B'], 'C') assert 'A' in header assert header['A'] == 'C' assert header.comments['A'] == '' assert 'B' in header assert header['B'] == 'C' assert header.comments['B'] == '' def test_header_fromkeys_with_value_and_comment(self): header = fits.Header.fromkeys(['A'], ('B', 'C')) assert 'A' in header assert header['A'] == 'B' assert header.comments['A'] == 'C' def test_header_fromkeys_with_duplicates(self): header = fits.Header.fromkeys(['A', 'B', 'A'], 'C') assert 'A' in header assert ('A', 0) in header assert ('A', 1) in header assert ('A', 2) not in header assert header[0] == 'C' assert header['A'] == 'C' assert header[('A', 0)] == 'C' assert header[2] == 'C' assert header[('A', 1)] == 'C' def test_header_items(self): header = fits.Header([('A', 'B'), ('C', 'D')]) assert list(header.items()) == [('A', 'B'), ('C', 'D')] def test_header_iterkeys(self): header = fits.Header([('A', 'B'), ('C', 'D')]) for a, b in zip(header.keys(), header): assert a == b def test_header_itervalues(self): header = fits.Header([('A', 'B'), ('C', 'D')]) for a, b in zip(header.values(), ['B', 'D']): assert a == b def test_header_keys(self): hdul = fits.open(self.data('arange.fits')) assert (list(hdul[0].header) == ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3', 'EXTEND']) def test_header_list_like_pop(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'), ('G', 'H')]) last = header.pop() assert last == 'H' assert len(header) == 3 assert list(header) == ['A', 'C', 'E'] mid = header.pop(1) assert mid == 'D' assert len(header) == 2 assert list(header) == ['A', 'E'] first = header.pop(0) assert first == 'B' assert len(header) == 1 assert list(header) == ['E'] pytest.raises(IndexError, header.pop, 42) def test_header_dict_like_pop(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'), ('G', 'H')]) pytest.raises(TypeError, header.pop, 'A', 'B', 'C') last = header.pop('G') assert last == 'H' assert len(header) == 3 assert list(header) == ['A', 'C', 'E'] mid = header.pop('C') assert mid == 'D' assert len(header) == 2 assert list(header) == ['A', 'E'] first = header.pop('A') assert first == 'B' assert len(header) == 1 assert list(header) == ['E'] default = header.pop('X', 'Y') assert default == 'Y' assert len(header) == 1 pytest.raises(KeyError, header.pop, 'X') def test_popitem(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) keyword, value = header.popitem() assert keyword not in header assert len(header) == 2 keyword, value = header.popitem() assert keyword not in header assert len(header) == 1 keyword, value = header.popitem() assert keyword not in header assert len(header) == 0 pytest.raises(KeyError, header.popitem) def test_setdefault(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) assert header.setdefault('A') == 'B' assert header.setdefault('C') == 'D' assert header.setdefault('E') == 'F' assert len(header) == 3 assert header.setdefault('G', 'H') == 'H' assert len(header) == 4 assert 'G' in header assert header.setdefault('G', 'H') == 'H' assert len(header) == 4 def test_update_from_dict(self): """ Test adding new cards and updating existing cards from a dict using Header.update() """ header = fits.Header([('A', 'B'), ('C', 'D')]) header.update({'A': 'E', 'F': 'G'}) assert header['A'] == 'E' assert header[0] == 'E' assert 'F' in header assert header['F'] == 'G' assert header[-1] == 'G' # Same as above but this time pass the update dict as keyword arguments header = fits.Header([('A', 'B'), ('C', 'D')]) header.update(A='E', F='G') assert header['A'] == 'E' assert header[0] == 'E' assert 'F' in header assert header['F'] == 'G' assert header[-1] == 'G' def test_update_from_iterable(self): """ Test adding new cards and updating existing cards from an iterable of cards and card tuples. """ header = fits.Header([('A', 'B'), ('C', 'D')]) header.update([('A', 'E'), fits.Card('F', 'G')]) assert header['A'] == 'E' assert header[0] == 'E' assert 'F' in header assert header['F'] == 'G' assert header[-1] == 'G' def test_header_extend(self): """ Test extending a header both with and without stripping cards from the extension header. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu2.header['MYKEY'] = ('some val', 'some comment') hdu.header += hdu2.header assert len(hdu.header) == 5 assert hdu.header[-1] == 'some val' # Same thing, but using + instead of += hdu = fits.PrimaryHDU() hdu.header = hdu.header + hdu2.header assert len(hdu.header) == 5 assert hdu.header[-1] == 'some val' # Directly append the other header in full--not usually a desirable # operation when the header is coming from another HDU hdu.header.extend(hdu2.header, strip=False) assert len(hdu.header) == 11 assert list(hdu.header)[5] == 'XTENSION' assert hdu.header[-1] == 'some val' assert ('MYKEY', 1) in hdu.header def test_header_extend_unique(self): """ Test extending the header with and without unique=True. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header['MYKEY'] = ('some val', 'some comment') hdu2.header['MYKEY'] = ('some other val', 'some other comment') hdu.header.extend(hdu2.header) assert len(hdu.header) == 6 assert hdu.header[-2] == 'some val' assert hdu.header[-1] == 'some other val' hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header['MYKEY'] = ('some val', 'some comment') hdu2.header['MYKEY'] = ('some other val', 'some other comment') hdu.header.extend(hdu2.header, unique=True) assert len(hdu.header) == 5 assert hdu.header[-1] == 'some val' def test_header_extend_unique_commentary(self): """ Test extending header with and without unique=True and commentary cards in the header being added. Issue astropy/astropy#3967 """ for commentary_card in ['', 'COMMENT', 'HISTORY']: for is_unique in [True, False]: hdu = fits.PrimaryHDU() # Make sure we are testing the case we want. assert commentary_card not in hdu.header hdu2 = fits.ImageHDU() hdu2.header[commentary_card] = 'My text' hdu.header.extend(hdu2.header, unique=is_unique) assert len(hdu.header) == 5 assert hdu.header[commentary_card][0] == 'My text' def test_header_extend_update(self): """ Test extending the header with and without update=True. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header['MYKEY'] = ('some val', 'some comment') hdu.header['HISTORY'] = 'history 1' hdu2.header['MYKEY'] = ('some other val', 'some other comment') hdu2.header['HISTORY'] = 'history 1' hdu2.header['HISTORY'] = 'history 2' hdu.header.extend(hdu2.header) assert len(hdu.header) == 9 assert ('MYKEY', 0) in hdu.header assert ('MYKEY', 1) in hdu.header assert hdu.header[('MYKEY', 1)] == 'some other val' assert len(hdu.header['HISTORY']) == 3 assert hdu.header[-1] == 'history 2' hdu = fits.PrimaryHDU() hdu.header['MYKEY'] = ('some val', 'some comment') hdu.header['HISTORY'] = 'history 1' hdu.header.extend(hdu2.header, update=True) assert len(hdu.header) == 7 assert ('MYKEY', 0) in hdu.header assert ('MYKEY', 1) not in hdu.header assert hdu.header['MYKEY'] == 'some other val' assert len(hdu.header['HISTORY']) == 2 assert hdu.header[-1] == 'history 2' def test_header_extend_update_commentary(self): """ Test extending header with and without unique=True and commentary cards in the header being added. Though not quite the same as astropy/astropy#3967, update=True hits the same if statement as that issue. """ for commentary_card in ['', 'COMMENT', 'HISTORY']: for is_update in [True, False]: hdu = fits.PrimaryHDU() # Make sure we are testing the case we want. assert commentary_card not in hdu.header hdu2 = fits.ImageHDU() hdu2.header[commentary_card] = 'My text' hdu.header.extend(hdu2.header, update=is_update) assert len(hdu.header) == 5 assert hdu.header[commentary_card][0] == 'My text' def test_header_extend_exact(self): """ Test that extending an empty header with the contents of an existing header can exactly duplicate that header, given strip=False and end=True. """ header = fits.getheader(self.data('test0.fits')) header2 = fits.Header() header2.extend(header, strip=False, end=True) assert header == header2 def test_header_count(self): header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')]) assert header.count('A') == 1 assert header.count('C') == 1 assert header.count('E') == 1 header['HISTORY'] = 'a' header['HISTORY'] = 'b' assert header.count('HISTORY') == 2 pytest.raises(KeyError, header.count, 'G') def test_header_append_use_blanks(self): """ Tests that blank cards can be appended, and that future appends will use blank cards when available (unless useblanks=False) """ header = fits.Header([('A', 'B'), ('C', 'D')]) # Append a couple blanks header.append() header.append() assert len(header) == 4 assert header[-1] == '' assert header[-2] == '' # New card should fill the first blank by default header.append(('E', 'F')) assert len(header) == 4 assert header[-2] == 'F' assert header[-1] == '' # This card should not use up a blank spot header.append(('G', 'H'), useblanks=False) assert len(header) == 5 assert header[-1] == '' assert header[-2] == 'H' def test_header_append_keyword_only(self): """ Test appending a new card with just the keyword, and no value or comment given. """ header = fits.Header([('A', 'B'), ('C', 'D')]) header.append('E') assert len(header) == 3 assert list(header)[-1] == 'E' assert header[-1] == '' assert header.comments['E'] == '' # Try appending a blank--normally this can be accomplished with just # header.append(), but header.append('') should also work (and is maybe # a little more clear) header.append('') assert len(header) == 4 assert list(header)[-1] == '' assert header[''] == '' assert header.comments[''] == '' def test_header_insert_use_blanks(self): header = fits.Header([('A', 'B'), ('C', 'D')]) # Append a couple blanks header.append() header.append() # Insert a new card; should use up one of the blanks header.insert(1, ('E', 'F')) assert len(header) == 4 assert header[1] == 'F' assert header[-1] == '' assert header[-2] == 'D' # Insert a new card without using blanks header.insert(1, ('G', 'H'), useblanks=False) assert len(header) == 5 assert header[1] == 'H' assert header[-1] == '' def test_header_insert_before_keyword(self): """ Test that a keyword name or tuple can be used to insert new keywords. Also tests the ``after`` keyword argument. Regression test for https://github.com/spacetelescope/PyFITS/issues/12 """ header = fits.Header([ ('NAXIS1', 10), ('COMMENT', 'Comment 1'), ('COMMENT', 'Comment 3')]) header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes')) assert list(header.keys())[0] == 'NAXIS' assert header[0] == 2 assert header.comments[0] == 'Number of axes' header.insert('NAXIS1', ('NAXIS2', 20), after=True) assert list(header.keys())[1] == 'NAXIS1' assert list(header.keys())[2] == 'NAXIS2' assert header[2] == 20 header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2')) assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3'] header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True) assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3', 'Comment 4'] header.insert(-1, ('TEST1', True)) assert list(header.keys())[-2] == 'TEST1' header.insert(-1, ('TEST2', True), after=True) assert list(header.keys())[-1] == 'TEST2' assert list(header.keys())[-3] == 'TEST1' def test_remove(self): header = fits.Header([('A', 'B'), ('C', 'D')]) # When keyword is present in the header it should be removed. header.remove('C') assert len(header) == 1 assert list(header) == ['A'] assert 'C' not in header # When keyword is not present in the header and ignore_missing is # False, KeyError should be raised with pytest.raises(KeyError): header.remove('F') # When keyword is not present and ignore_missing is True, KeyError # will be ignored header.remove('F', ignore_missing=True) assert len(header) == 1 # Test for removing all instances of a keyword header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')]) header.remove('A', remove_all=True) assert 'A' not in header assert len(header) == 1 assert list(header) == ['C'] assert header[0] == 'D' def test_header_comments(self): header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')]) assert (repr(header.comments) == ' A C\n' ' DEF H') def test_comment_slices_and_filters(self): header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'), ('AI', 'J', 'K')]) s = header.comments[1:] assert list(s) == ['H', 'K'] s = header.comments[::-1] assert list(s) == ['K', 'H', 'D'] s = header.comments['A*'] assert list(s) == ['D', 'K'] def test_comment_slice_filter_assign(self): header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'), ('AI', 'J', 'K')]) header.comments[1:] = 'L' assert list(header.comments) == ['D', 'L', 'L'] assert header.cards[header.index('AB')].comment == 'D' assert header.cards[header.index('EF')].comment == 'L' assert header.cards[header.index('AI')].comment == 'L' header.comments[::-1] = header.comments[:] assert list(header.comments) == ['L', 'L', 'D'] header.comments['A*'] = ['M', 'N'] assert list(header.comments) == ['M', 'L', 'N'] def test_commentary_slicing(self): header = fits.Header() indices = list(range(5)) for idx in indices: header['HISTORY'] = idx # Just a few sample slice types; this won't get all corner cases but if # these all work we should be in good shape assert header['HISTORY'][1:] == indices[1:] assert header['HISTORY'][:3] == indices[:3] assert header['HISTORY'][:6] == indices[:6] assert header['HISTORY'][:-2] == indices[:-2] assert header['HISTORY'][::-1] == indices[::-1] assert header['HISTORY'][1::-1] == indices[1::-1] assert header['HISTORY'][1:5:2] == indices[1:5:2] # Same tests, but copy the values first; as it turns out this is # different from just directly doing an __eq__ as in the first set of # assertions header.insert(0, ('A', 'B', 'C')) header.append(('D', 'E', 'F'), end=True) assert list(header['HISTORY'][1:]) == indices[1:] assert list(header['HISTORY'][:3]) == indices[:3] assert list(header['HISTORY'][:6]) == indices[:6] assert list(header['HISTORY'][:-2]) == indices[:-2] assert list(header['HISTORY'][::-1]) == indices[::-1] assert list(header['HISTORY'][1::-1]) == indices[1::-1] assert list(header['HISTORY'][1:5:2]) == indices[1:5:2] def test_update_commentary(self): header = fits.Header() header['FOO'] = 'BAR' header['HISTORY'] = 'ABC' header['FRED'] = 'BARNEY' header['HISTORY'] = 'DEF' header['HISTORY'] = 'GHI' assert header['HISTORY'] == ['ABC', 'DEF', 'GHI'] # Single value update header['HISTORY'][0] = 'FOO' assert header['HISTORY'] == ['FOO', 'DEF', 'GHI'] # Single value partial slice update header['HISTORY'][1:] = 'BAR' assert header['HISTORY'] == ['FOO', 'BAR', 'BAR'] # Multi-value update header['HISTORY'][:] = ['BAZ', 'QUX'] assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR'] def test_commentary_comparison(self): """ Regression test for an issue found in *writing* the regression test for https://github.com/astropy/astropy/issues/2363, where comparison of the list of values for a commentary keyword did not always compare correctly with other iterables. """ header = fits.Header() header['HISTORY'] = 'hello world' header['HISTORY'] = 'hello world' header['COMMENT'] = 'hello world' assert header['HISTORY'] != header['COMMENT'] header['COMMENT'] = 'hello world' assert header['HISTORY'] == header['COMMENT'] def test_long_commentary_card(self): header = fits.Header() header['FOO'] = 'BAR' header['BAZ'] = 'QUX' longval = 'ABC' * 30 header['HISTORY'] = longval header['FRED'] = 'BARNEY' header['HISTORY'] = longval assert len(header) == 7 assert list(header)[2] == 'FRED' assert str(header.cards[3]) == 'HISTORY ' + longval[:72] assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:] header.set('HISTORY', longval, after='FOO') assert len(header) == 9 assert str(header.cards[1]) == 'HISTORY ' + longval[:72] assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:] header = fits.Header() header.update({'FOO': 'BAR'}) header.update({'BAZ': 'QUX'}) longval = 'ABC' * 30 header.add_history(longval) header.update({'FRED': 'BARNEY'}) header.add_history(longval) assert len(header.cards) == 7 assert header.cards[2].keyword == 'FRED' assert str(header.cards[3]) == 'HISTORY ' + longval[:72] assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:] header.add_history(longval, after='FOO') assert len(header.cards) == 9 assert str(header.cards[1]) == 'HISTORY ' + longval[:72] assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:] def test_totxtfile(self): hdul = fits.open(self.data('test0.fits')) hdul[0].header.totextfile(self.temp('header.txt')) hdu = fits.ImageHDU() hdu.header.update({'MYKEY': 'FOO'}) hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')), update=True, update_first=True) # Write the hdu out and read it back in again--it should be recognized # as a PrimaryHDU hdu.writeto(self.temp('test.fits'), output_verify='ignore') assert isinstance(fits.open(self.temp('test.fits'))[0], fits.PrimaryHDU) hdu = fits.ImageHDU() hdu.header.update({'MYKEY': 'FOO'}) hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')), update=True, update_first=True, strip=False) assert 'MYKEY' in hdu.header assert 'EXTENSION' not in hdu.header assert 'SIMPLE' in hdu.header with ignore_warnings(): hdu.writeto(self.temp('test.fits'), output_verify='ignore', overwrite=True) hdul2 = fits.open(self.temp('test.fits')) assert len(hdul2) == 2 assert 'MYKEY' in hdul2[1].header def test_header_fromtextfile(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122 Manually write a text file containing some header cards ending with newlines and ensure that fromtextfile can read them back in. """ header = fits.Header() header['A'] = ('B', 'C') header['B'] = ('C', 'D') header['C'] = ('D', 'E') with open(self.temp('test.hdr'), 'w') as f: f.write('\n'.join(str(c).strip() for c in header.cards)) header2 = fits.Header.fromtextfile(self.temp('test.hdr')) assert header == header2 def test_header_fromtextfile_with_end_card(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154 Make sure that when a Header is read from a text file that the END card is ignored. """ header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')]) # We don't use header.totextfile here because it writes each card with # trailing spaces to pad them out to 80 characters. But this bug only # presents itself when each card ends immediately with a newline, and # no trailing spaces with open(self.temp('test.hdr'), 'w') as f: f.write('\n'.join(str(c).strip() for c in header.cards)) f.write('\nEND') new_header = fits.Header.fromtextfile(self.temp('test.hdr')) assert 'END' not in new_header assert header == new_header def test_append_end_card(self): """ Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154 Manually adding an END card to a header should simply result in a ValueError (as was the case in PyFITS 3.0 and earlier). """ header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')]) def setitem(k, v): header[k] = v pytest.raises(ValueError, setitem, 'END', '') pytest.raises(ValueError, header.append, 'END') pytest.raises(ValueError, header.append, 'END', end=True) pytest.raises(ValueError, header.insert, len(header), 'END') pytest.raises(ValueError, header.set, 'END') def test_invalid_end_cards(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217 This tests the case where the END card looks like a normal card like 'END = ' and other similar oddities. As long as a card starts with END and looks like it was intended to be the END card we allow it, but with a warning. """ horig = fits.PrimaryHDU(data=np.arange(100)).header def invalid_header(end, pad): # Build up a goofy invalid header # Start from a seemingly normal header s = horig.tostring(sep='', endcard=False, padding=False) # append the bogus end card s += end # add additional padding if requested if pad: s += ' ' * _pad_length(len(s)) # This will differ between Python versions if isinstance(s, bytes): return BytesIO(s) else: return StringIO(s) # Basic case motivated by the original issue; it's as if the END card # was appened by software that doesn't know to treat it specially, and # it is given an = after it s = invalid_header('END =', True) with catch_warnings() as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 assert str(w[0].message).startswith( "Unexpected bytes trailing END keyword: ' ='") # A case similar to the last but with more spaces between END and the # =, as though the '= ' value indicator were placed like that of a # normal card s = invalid_header('END = ', True) with catch_warnings() as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 assert str(w[0].message).startswith( "Unexpected bytes trailing END keyword: ' ='") # END card with trailing gibberish s = invalid_header('END$%&%^*%*', True) with catch_warnings() as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 assert str(w[0].message).startswith( "Unexpected bytes trailing END keyword: '$%&%^*%*'") # 'END' at the very end of a truncated file without padding; the way # the block reader works currently this can only happen if the 'END' # is at the very end of the file. s = invalid_header('END', False) with catch_warnings() as w: # Don't raise an exception on missing padding, but still produce a # warning that the END card is incomplete h = fits.Header.fromfile(s, padding=False) assert h == horig assert len(w) == 1 assert str(w[0].message).startswith( "Missing padding to end of the FITS block") def test_invalid_characters(self): """ Test header with invalid characters """ # Generate invalid file with non-ASCII character h = fits.Header() h['FOO'] = 'BAR' h['COMMENT'] = 'hello' hdul = fits.PrimaryHDU(header=h, data=np.arange(5)) hdul.writeto(self.temp('test.fits')) with open(self.temp('test.fits'), 'rb') as f: out = f.read() out = out.replace(b'hello', u'héllo'.encode('latin1')) out = out.replace(b'BAR', u'BÀR'.encode('latin1')) with open(self.temp('test2.fits'), 'wb') as f2: f2.write(out) with catch_warnings() as w: h = fits.getheader(self.temp('test2.fits')) assert h['FOO'] == 'B?R' assert h['COMMENT'] == 'h?llo' assert len(w) == 1 assert str(w[0].message).startswith( "non-ASCII characters are present in the FITS file") def test_unnecessary_move(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125 Ensures that a header is not modified when setting the position of a keyword that's already in its correct position. """ header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')]) header.set('B', before=2) assert list(header) == ['A', 'B', 'C'] assert not header._modified header.set('B', after=0) assert list(header) == ['A', 'B', 'C'] assert not header._modified header.set('B', before='C') assert list(header) == ['A', 'B', 'C'] assert not header._modified header.set('B', after='A') assert list(header) == ['A', 'B', 'C'] assert not header._modified header.set('B', before=2) assert list(header) == ['A', 'B', 'C'] assert not header._modified # 123 is well past the end, and C is already at the end, so it's in the # right place already header.set('C', before=123) assert list(header) == ['A', 'B', 'C'] assert not header._modified header.set('C', after=123) assert list(header) == ['A', 'B', 'C'] assert not header._modified def test_invalid_float_cards(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137""" # Create a header containing two of the problematic cards in the test # case where this came up: hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000" h = fits.Header.fromstring(hstr, sep='\n') # First the case that *does* work prior to fixing this issue assert h['FOCALLEN'] == 155.0 assert h['APERTURE'] == 0.0 # Now if this were reserialized, would new values for these cards be # written with repaired exponent signs? assert (str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")) assert h.cards['FOCALLEN']._modified assert (str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")) assert h.cards['APERTURE']._modified assert h._modified # This is the case that was specifically causing problems; generating # the card strings *before* parsing the values. Also, the card strings # really should be "fixed" before being returned to the user h = fits.Header.fromstring(hstr, sep='\n') assert (str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")) assert h.cards['FOCALLEN']._modified assert (str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")) assert h.cards['APERTURE']._modified assert h['FOCALLEN'] == 155.0 assert h['APERTURE'] == 0.0 assert h._modified # For the heck of it, try assigning the identical values and ensure # that the newly fixed value strings are left intact h['FOCALLEN'] = 155.0 h['APERTURE'] = 0.0 assert (str(h.cards['FOCALLEN']) == _pad("FOCALLEN= +1.550000000000E+002")) assert (str(h.cards['APERTURE']) == _pad("APERTURE= +0.000000000000E+000")) def test_invalid_float_cards2(self, capsys): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140 """ # The example for this test requires creating a FITS file containing a # slightly misformatted float value. I can't actually even find a way # to do that directly through Astropy--it won't let me. hdu = fits.PrimaryHDU() hdu.header['TEST'] = 5.0022221e-07 hdu.writeto(self.temp('test.fits')) # Here we manually make the file invalid with open(self.temp('test.fits'), 'rb+') as f: f.seek(346) # Location of the exponent 'E' symbol f.write(encode_ascii('e')) hdul = fits.open(self.temp('test.fits')) with catch_warnings() as w: hdul.writeto(self.temp('temp.fits'), output_verify='warn') assert len(w) == 5 # The first two warnings are just the headers to the actual warning # message (HDU 0, Card 4). I'm still not sure things like that # should be output as separate warning messages, but that's # something to think about... msg = str(w[3].message) assert "(invalid value string: '5.0022221e-07')" in msg def test_leading_zeros(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2 Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in float values like 0.001 the leading zero was unnecessarily being stripped off when rewriting the header. Though leading zeros should be removed from integer values to prevent misinterpretation as octal by python (for now Astropy will still maintain the leading zeros if now changes are made to the value, but will drop them if changes are made). """ c = fits.Card.fromstring("APERTURE= +0.000000000000E+000") assert str(c) == _pad("APERTURE= +0.000000000000E+000") assert c.value == 0.0 c = fits.Card.fromstring("APERTURE= 0.000000000000E+000") assert str(c) == _pad("APERTURE= 0.000000000000E+000") assert c.value == 0.0 c = fits.Card.fromstring("APERTURE= 017") assert str(c) == _pad("APERTURE= 017") assert c.value == 17 def test_assign_boolean(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123 Tests assigning Python and Numpy boolean values to keyword values. """ fooimg = _pad('FOO = T') barimg = _pad('BAR = F') h = fits.Header() h['FOO'] = True h['BAR'] = False assert h['FOO'] is True assert h['BAR'] is False assert str(h.cards['FOO']) == fooimg assert str(h.cards['BAR']) == barimg h = fits.Header() h['FOO'] = np.bool_(True) h['BAR'] = np.bool_(False) assert h['FOO'] is True assert h['BAR'] is False assert str(h.cards['FOO']) == fooimg assert str(h.cards['BAR']) == barimg h = fits.Header() h.append(fits.Card.fromstring(fooimg)) h.append(fits.Card.fromstring(barimg)) assert h['FOO'] is True assert h['BAR'] is False assert str(h.cards['FOO']) == fooimg assert str(h.cards['BAR']) == barimg def test_header_method_keyword_normalization(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149 Basically ensures that all public Header methods are case-insensitive w.r.t. keywords. Provides a reasonably comprehensive test of several methods at once. """ h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)]) assert list(h) == ['ABC', 'DEF', 'GEH'] assert 'abc' in h assert 'dEf' in h assert h['geh'] == 3 # Case insensitivity of wildcards assert len(h['g*']) == 1 h['aBc'] = 2 assert h['abc'] == 2 # ABC already existed so assigning to aBc should not have added any new # cards assert len(h) == 3 del h['gEh'] assert list(h) == ['ABC', 'DEF'] assert len(h) == 2 assert h.get('def') == 2 h.set('Abc', 3) assert h['ABC'] == 3 h.set('gEh', 3, before='Abc') assert list(h) == ['GEH', 'ABC', 'DEF'] assert h.pop('abC') == 3 assert len(h) == 2 assert h.setdefault('def', 3) == 2 assert len(h) == 2 assert h.setdefault('aBc', 1) == 1 assert len(h) == 3 assert list(h) == ['GEH', 'DEF', 'ABC'] h.update({'GeH': 1, 'iJk': 4}) assert len(h) == 4 assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK'] assert h['GEH'] == 1 assert h.count('ijk') == 1 assert h.index('ijk') == 3 h.remove('Def') assert len(h) == 3 assert list(h) == ['GEH', 'ABC', 'IJK'] def test_end_in_comment(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142 Tests a case where the comment of a card ends with END, and is followed by several blank cards. """ data = np.arange(100).reshape(10, 10) hdu = fits.PrimaryHDU(data=data) hdu.header['TESTKW'] = ('Test val', 'This is the END') # Add a couple blanks after the END string hdu.header.append() hdu.header.append() hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), memmap=False) as hdul: # memmap = False to avoid leaving open a mmap to the file when we # access the data--this causes problems on Windows when we try to # overwrite the file later assert 'TESTKW' in hdul[0].header assert hdul[0].header == hdu.header assert (hdul[0].data == data).all() # Add blanks until the header is extended to two block sizes while len(hdu.header) < 36: hdu.header.append() with ignore_warnings(): hdu.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as hdul: assert 'TESTKW' in hdul[0].header assert hdul[0].header == hdu.header assert (hdul[0].data == data).all() # Test parsing the same header when it's written to a text file hdu.header.totextfile(self.temp('test.hdr')) header2 = fits.Header.fromtextfile(self.temp('test.hdr')) assert hdu.header == header2 def test_assign_unicode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134 Assigning a unicode literal as a header value should not fail silently. If the value can be converted to ASCII then it should just work. Otherwise it should fail with an appropriate value error. Also tests unicode for keywords and comments. """ erikku = '\u30a8\u30ea\u30c3\u30af' def assign(keyword, val): h[keyword] = val h = fits.Header() h['FOO'] = 'BAR' assert 'FOO' in h assert h['FOO'] == 'BAR' assert repr(h) == _pad("FOO = 'BAR '") pytest.raises(ValueError, assign, erikku, 'BAR') h['FOO'] = 'BAZ' assert h['FOO'] == 'BAZ' assert repr(h) == _pad("FOO = 'BAZ '") pytest.raises(ValueError, assign, 'FOO', erikku) h['FOO'] = ('BAR', 'BAZ') assert h['FOO'] == 'BAR' assert h.comments['FOO'] == 'BAZ' assert repr(h) == _pad("FOO = 'BAR ' / BAZ") pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku)) pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ')) pytest.raises(ValueError, assign, 'FOO', (erikku, erikku)) def test_assign_non_ascii(self): """ First regression test for https://github.com/spacetelescope/PyFITS/issues/37 Although test_assign_unicode ensures that `str` objects containing non-ASCII characters cannot be assigned to headers. It should not be possible to assign bytes to a header at all. """ h = fits.Header() pytest.raises(ValueError, h.set, 'TEST', bytes('Hello', encoding='ascii')) def test_header_strip_whitespace(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and for the solution that is optional stripping of whitespace from the end of a header value. By default extra whitespace is stripped off, but if `fits.conf.strip_header_whitespace` = False it should not be stripped. """ h = fits.Header() h['FOO'] = 'Bar ' assert h['FOO'] == 'Bar' c = fits.Card.fromstring("QUX = 'Bar '") h.append(c) assert h['QUX'] == 'Bar' assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '" assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '" with fits.conf.set_temp('strip_header_whitespace', False): assert h['FOO'] == 'Bar ' assert h['QUX'] == 'Bar ' assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '" assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '" assert h['FOO'] == 'Bar' assert h['QUX'] == 'Bar' assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '" assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '" def test_keep_duplicate_history_in_orig_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156 When creating a new HDU from an existing Header read from an existing FITS file, if the origianl header contains duplicate HISTORY values those duplicates should be preserved just as in the original header. This bug occurred due to naivete in Header.extend. """ history = ['CCD parameters table ...', ' reference table oref$n951041ko_ccd.fits', ' INFLIGHT 12/07/2001 25/02/2002', ' all bias frames'] * 3 hdu = fits.PrimaryHDU() # Add the history entries twice for item in history: hdu.header['HISTORY'] = item hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[0].header['HISTORY'] == history new_hdu = fits.PrimaryHDU(header=hdu.header) assert new_hdu.header['HISTORY'] == hdu.header['HISTORY'] new_hdu.writeto(self.temp('test2.fits')) with fits.open(self.temp('test2.fits')) as hdul: assert hdul[0].header['HISTORY'] == history def test_invalid_keyword_cards(self): """ Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109 Allow opening files with headers containing invalid keywords. """ # Create a header containing a few different types of BAD headers. c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30') c2 = fits.Card.fromstring('Just some random text.') c3 = fits.Card.fromstring('A' * 80) hdu = fits.PrimaryHDU() # This should work with some warnings with catch_warnings() as w: hdu.header.append(c1) hdu.header.append(c2) hdu.header.append(c3) assert len(w) == 3 hdu.writeto(self.temp('test.fits')) with catch_warnings() as w: with fits.open(self.temp('test.fits')) as hdul: # Merely opening the file should blast some warnings about the # invalid keywords assert len(w) == 3 header = hdul[0].header assert 'CLFIND2D' in header assert 'Just som' in header assert 'AAAAAAAA' in header assert header['CLFIND2D'] == ': contour = 0.30' assert header['Just som'] == 'e random text.' assert header['AAAAAAAA'] == 'A' * 72 # It should not be possible to assign to the invalid keywords pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo') pytest.raises(ValueError, header.set, 'Just som', 'foo') pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo') def test_fix_hierarch_with_invalid_value(self, capsys): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172 Ensures that when fixing a hierarch card it remains a hierarch card. """ c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6') c.verify('fix') assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6') def test_assign_inf_nan(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/11 For the time being it should not be possible to assign the floating point values inf or nan to a header value, since this is not defined by the FITS standard. """ h = fits.Header() pytest.raises(ValueError, h.set, 'TEST', float('nan')) pytest.raises(ValueError, h.set, 'TEST', np.nan) pytest.raises(ValueError, h.set, 'TEST', float('inf')) pytest.raises(ValueError, h.set, 'TEST', np.inf) def test_update_bool(self): """ Regression test for an issue where a value of True in a header cannot be updated to a value of 1, and likewise for False/0. """ h = fits.Header([('TEST', True)]) h['TEST'] = 1 assert h['TEST'] is not True assert isinstance(h['TEST'], int) assert h['TEST'] == 1 h['TEST'] = np.bool_(True) assert h['TEST'] is True h['TEST'] = False assert h['TEST'] is False h['TEST'] = np.bool_(False) assert h['TEST'] is False h['TEST'] = 0 assert h['TEST'] is not False assert isinstance(h['TEST'], int) assert h['TEST'] == 0 h['TEST'] = np.bool_(False) assert h['TEST'] is False def test_update_numeric(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/49 Ensure that numeric values can be upcast/downcast between int, float, and complex by assigning values that compare equal to the existing value but are a different type. """ h = fits.Header() h['TEST'] = 1 # int -> float h['TEST'] = 1.0 assert isinstance(h['TEST'], float) assert str(h).startswith('TEST = 1.0') # float -> int h['TEST'] = 1 assert isinstance(h['TEST'], int) assert str(h).startswith('TEST = 1') # int -> complex h['TEST'] = 1.0+0.0j assert isinstance(h['TEST'], complex) assert str(h).startswith('TEST = (1.0, 0.0)') # complex -> float h['TEST'] = 1.0 assert isinstance(h['TEST'], float) assert str(h).startswith('TEST = 1.0') # float -> complex h['TEST'] = 1.0+0.0j assert isinstance(h['TEST'], complex) assert str(h).startswith('TEST = (1.0, 0.0)') # complex -> int h['TEST'] = 1 assert isinstance(h['TEST'], int) assert str(h).startswith('TEST = 1') # Now the same tests but with zeros h['TEST'] = 0 # int -> float h['TEST'] = 0.0 assert isinstance(h['TEST'], float) assert str(h).startswith('TEST = 0.0') # float -> int h['TEST'] = 0 assert isinstance(h['TEST'], int) assert str(h).startswith('TEST = 0') # int -> complex h['TEST'] = 0.0+0.0j assert isinstance(h['TEST'], complex) assert str(h).startswith('TEST = (0.0, 0.0)') # complex -> float h['TEST'] = 0.0 assert isinstance(h['TEST'], float) assert str(h).startswith('TEST = 0.0') # float -> complex h['TEST'] = 0.0+0.0j assert isinstance(h['TEST'], complex) assert str(h).startswith('TEST = (0.0, 0.0)') # complex -> int h['TEST'] = 0 assert isinstance(h['TEST'], int) assert str(h).startswith('TEST = 0') def test_newlines_in_commentary(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/51 Test data extracted from a header in an actual FITS file found in the wild. Names have been changed to protect the innocent. """ # First ensure that we can't assign new keyword values with newlines in # them h = fits.Header() pytest.raises(ValueError, h.set, 'HISTORY', '\n') pytest.raises(ValueError, h.set, 'HISTORY', '\nabc') pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n') pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef') test_cards = [ "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 " "HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 " "HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 " "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif" "HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use" "HISTORY r ' fred' with fv on 2013-11-04T16:59:14 " "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif" "HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use" "HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' " "HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv " "HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1" "HISTORY 1-04T16:59:14 " ] for card_image in test_cards: c = fits.Card.fromstring(card_image) if '\n' in card_image: pytest.raises(fits.VerifyError, c.verify, 'exception') else: c.verify('exception') class TestRecordValuedKeywordCards(FitsTestCase): """ Tests for handling of record-valued keyword cards as used by the `FITS WCS distortion paper <http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__. These tests are derived primarily from the release notes for PyFITS 1.4 (in which this feature was first introduced. """ def setup(self): super().setup() self._test_header = fits.Header() self._test_header.set('DP1', 'NAXIS: 2') self._test_header.set('DP1', 'AXIS.1: 1') self._test_header.set('DP1', 'AXIS.2: 2') self._test_header.set('DP1', 'NAUX: 2') self._test_header.set('DP1', 'AUX.1.COEFF.0: 0') self._test_header.set('DP1', 'AUX.1.POWER.0: 1') self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125') self._test_header.set('DP1', 'AUX.1.POWER.1: 1') def test_initialize_rvkc(self): """ Test different methods for initializing a card that should be recognized as a RVKC """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.field_specifier == 'NAXIS' assert c.comment == 'A comment' c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'") assert c.keyword == 'DP1.NAXIS' assert c.value == 2.1 assert c.field_specifier == 'NAXIS' c = fits.Card.fromstring("DP1 = 'NAXIS: a'") assert c.keyword == 'DP1' assert c.value == 'NAXIS: a' assert c.field_specifier is None c = fits.Card('DP1', 'NAXIS: 2') assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.field_specifier == 'NAXIS' c = fits.Card('DP1', 'NAXIS: 2.0') assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.field_specifier == 'NAXIS' c = fits.Card('DP1', 'NAXIS: a') assert c.keyword == 'DP1' assert c.value == 'NAXIS: a' assert c.field_specifier is None c = fits.Card('DP1.NAXIS', 2) assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.field_specifier == 'NAXIS' c = fits.Card('DP1.NAXIS', 2.0) assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.field_specifier == 'NAXIS' with ignore_warnings(): c = fits.Card('DP1.NAXIS', 'a') assert c.keyword == 'DP1.NAXIS' assert c.value == 'a' assert c.field_specifier is None def test_parse_field_specifier(self): """ Tests that the field_specifier can accessed from a card read from a string before any other attributes are accessed. """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.field_specifier == 'NAXIS' assert c.keyword == 'DP1.NAXIS' assert c.value == 2.0 assert c.comment == 'A comment' def test_update_field_specifier(self): """ Test setting the field_specifier attribute and updating the card image to reflect the new value. """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.field_specifier == 'NAXIS' c.field_specifier = 'NAXIS1' assert c.field_specifier == 'NAXIS1' assert c.keyword == 'DP1.NAXIS1' assert c.value == 2.0 assert c.comment == 'A comment' assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment" def test_field_specifier_case_senstivity(self): """ The keyword portion of an RVKC should still be case-insensitive, but the field-specifier portion should be case-sensitive. """ header = fits.Header() header.set('abc.def', 1) header.set('abc.DEF', 2) assert header['abc.def'] == 1 assert header['ABC.def'] == 1 assert header['aBc.def'] == 1 assert header['ABC.DEF'] == 2 assert 'ABC.dEf' not in header def test_get_rvkc_by_index(self): """ Returning a RVKC from a header via index lookup should return the float value of the card. """ assert self._test_header[0] == 2.0 assert isinstance(self._test_header[0], float) assert self._test_header[1] == 1.0 assert isinstance(self._test_header[1], float) def test_get_rvkc_by_keyword(self): """ Returning a RVKC just via the keyword name should return the full value string of the first card with that keyword. This test was changed to reflect the requirement in ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required _test_header['DP1'] to return the parsed float value. """ assert self._test_header['DP1'] == 'NAXIS: 2' def test_get_rvkc_by_keyword_and_field_specifier(self): """ Returning a RVKC via the full keyword/field-specifier combination should return the floating point value associated with the RVKC. """ assert self._test_header['DP1.NAXIS'] == 2.0 assert isinstance(self._test_header['DP1.NAXIS'], float) assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125 def test_access_nonexistent_rvkc(self): """ Accessing a nonexistent RVKC should raise an IndexError for index-based lookup, or a KeyError for keyword lookup (like a normal card). """ pytest.raises(IndexError, lambda x: self._test_header[x], 8) pytest.raises(KeyError, lambda k: self._test_header[k], 'DP1.AXIS.3') # Test the exception message try: self._test_header['DP1.AXIS.3'] except KeyError as e: assert e.args[0] == "Keyword 'DP1.AXIS.3' not found." def test_update_rvkc(self): """A RVKC can be updated either via index or keyword access.""" self._test_header[0] = 3 assert self._test_header['DP1.NAXIS'] == 3.0 assert isinstance(self._test_header['DP1.NAXIS'], float) self._test_header['DP1.AXIS.1'] = 1.1 assert self._test_header['DP1.AXIS.1'] == 1.1 def test_update_rvkc_2(self): """Regression test for an issue that appeared after SVN r2412.""" h = fits.Header() h['D2IM1.EXTVER'] = 1 assert h['D2IM1.EXTVER'] == 1.0 h['D2IM1.EXTVER'] = 2 assert h['D2IM1.EXTVER'] == 2.0 def test_raw_keyword_value(self): c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.rawkeyword == 'DP1' assert c.rawvalue == 'NAXIS: 2' c = fits.Card('DP1.NAXIS', 2) assert c.rawkeyword == 'DP1' assert c.rawvalue == 'NAXIS: 2.0' c = fits.Card('DP1.NAXIS', 2.0) assert c.rawkeyword == 'DP1' assert c.rawvalue == 'NAXIS: 2.0' def test_rvkc_insert_after(self): """ It should be possible to insert a new RVKC after an existing one specified by the full keyword/field-specifier combination.""" self._test_header.set('DP1', 'AXIS.3: 1', 'a comment', after='DP1.AXIS.2') assert self._test_header[3] == 1 assert self._test_header['DP1.AXIS.3'] == 1 def test_rvkc_delete(self): """ Deleting a RVKC should work as with a normal card by using the full keyword/field-spcifier combination. """ del self._test_header['DP1.AXIS.1'] assert len(self._test_header) == 7 assert list(self._test_header)[0] == 'DP1.NAXIS' assert self._test_header[0] == 2 assert list(self._test_header)[1] == 'DP1.AXIS.2' # Perform a subsequent delete to make sure all the index mappings were # updated del self._test_header['DP1.AXIS.2'] assert len(self._test_header) == 6 assert list(self._test_header)[0] == 'DP1.NAXIS' assert self._test_header[0] == 2 assert list(self._test_header)[1] == 'DP1.NAUX' assert self._test_header[1] == 2 def test_pattern_matching_keys(self): """Test the keyword filter strings with RVKCs.""" cl = self._test_header['DP1.AXIS.*'] assert isinstance(cl, fits.Header) assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'"]) cl = self._test_header['DP1.N*'] assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'", "DP1 = 'NAUX: 2'"]) cl = self._test_header['DP1.AUX...'] assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'"]) cl = self._test_header['DP?.NAXIS'] assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"]) cl = self._test_header['DP1.A*S.*'] assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'"]) def test_pattern_matching_key_deletion(self): """Deletion by filter strings should work.""" del self._test_header['DP1.A*...'] assert len(self._test_header) == 2 assert list(self._test_header)[0] == 'DP1.NAXIS' assert self._test_header[0] == 2 assert list(self._test_header)[1] == 'DP1.NAUX' assert self._test_header[1] == 2 def test_successive_pattern_matching(self): """ A card list returned via a filter string should be further filterable. """ cl = self._test_header['DP1.A*...'] assert ([str(c).strip() for c in cl.cards] == ["DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'", "DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'"]) cl2 = cl['*.*AUX...'] assert ([str(c).strip() for c in cl2.cards] == ["DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'"]) def test_rvkc_in_cardlist_keys(self): """ The CardList.keys() method should return full keyword/field-spec values for RVKCs. """ cl = self._test_header['DP1.AXIS.*'] assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2'] def test_rvkc_in_cardlist_values(self): """ The CardList.values() method should return the values of all RVKCs as floating point values. """ cl = self._test_header['DP1.AXIS.*'] assert list(cl.values()) == [1.0, 2.0] def test_rvkc_value_attribute(self): """ Individual card values should be accessible by the .value attribute (which should return a float). """ cl = self._test_header['DP1.AXIS.*'] assert cl.cards[0].value == 1.0 assert isinstance(cl.cards[0].value, float) def test_overly_permissive_parsing(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183 Ensures that cards with standard commentary keywords are never treated as RVKCs. Also ensures that cards not stricly matching the RVKC pattern are not treated as such. """ h = fits.Header() h['HISTORY'] = 'AXIS.1: 2' h['HISTORY'] = 'AXIS.2: 2' assert 'HISTORY.AXIS' not in h assert 'HISTORY.AXIS.1' not in h assert 'HISTORY.AXIS.2' not in h assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2'] # This is an example straight out of the ticket where everything after # the '2012' in the date value was being ignored, allowing the value to # successfully be parsed as a "float" h = fits.Header() h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061' assert 'HISTORY.Date' not in h assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061') c = fits.Card.fromstring( " 'Date: 2012-09-19T13:58:53.756061'") assert c.keyword == '' assert c.value == "'Date: 2012-09-19T13:58:53.756061'" assert c.field_specifier is None h = fits.Header() h['FOO'] = 'Date: 2012-09-19T13:58:53.756061' assert 'FOO.Date' not in h assert (str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")) def test_overly_aggressive_rvkc_lookup(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184 Ensures that looking up a RVKC by keyword only (without the field-specifier) in a header returns the full string value of that card without parsing it as a RVKC. Also ensures that a full field-specifier is required to match a RVKC--a partial field-specifier that doesn't explicitly match any record-valued keyword should result in a KeyError. """ c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'") c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'") h = fits.Header([c1, c2]) assert h['FOO'] == 'AXIS.1: 2' assert h[('FOO', 1)] == 'AXIS.2: 4' assert h['FOO.AXIS.1'] == 2.0 assert h['FOO.AXIS.2'] == 4.0 assert 'FOO.AXIS' not in h assert 'FOO.AXIS.' not in h assert 'FOO.' not in h pytest.raises(KeyError, lambda: h['FOO.AXIS']) pytest.raises(KeyError, lambda: h['FOO.AXIS.']) pytest.raises(KeyError, lambda: h['FOO.']) def test_fitsheader_script(self): """Tests the basic functionality of the `fitsheader` script.""" from ....io.fits.scripts import fitsheader # Can an extension by specified by the EXTNAME keyword? hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits')) output = hf.parse(extensions=['AIPS FQ']) assert "EXTNAME = 'AIPS FQ" in output assert "BITPIX" in output # Can we limit the display to one specific keyword? output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME']) assert "EXTNAME = 'AIPS FQ" in output assert "BITPIX =" not in output assert len(output.split('\n')) == 3 # Can we limit the display to two specific keywords? output = hf.parse(extensions=[1], keywords=['EXTNAME', 'BITPIX']) assert "EXTNAME =" in output assert "BITPIX =" in output assert len(output.split('\n')) == 4 # Can we use wildcards for keywords? output = hf.parse(extensions=[1], keywords=['NAXIS*']) assert "NAXIS =" in output assert "NAXIS1 =" in output assert "NAXIS2 =" in output # Can an extension by specified by the EXTNAME+EXTVER keywords? hf = fitsheader.HeaderFormatter(self.data('test0.fits')) assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2']) # Can we print the original header before decompression? hf = fitsheader.HeaderFormatter(self.data('comp.fits')) assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False) assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True) def test_fitsheader_table_feature(self): """Tests the `--table` feature of the `fitsheader` script.""" from ....io import fits from ....io.fits.scripts import fitsheader test_filename = self.data('zerowidth.fits') fitsobj = fits.open(test_filename) formatter = fitsheader.TableHeaderFormatter(test_filename) # Does the table contain the expected number of rows? mytable = formatter.parse([0]) assert len(mytable) == len(fitsobj[0].header) # Repeat the above test when multiple HDUs are requested mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"]) assert len(mytable) == (len(fitsobj['AIPS FQ'].header) + len(fitsobj[2].header) + len(fitsobj[4].header)) # Can we recover the filename and extension name from the table? mytable = formatter.parse(extensions=['AIPS FQ']) assert np.all(mytable['filename'] == test_filename) assert np.all(mytable['hdu'] == 'AIPS FQ') assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ" # Can we specify a single extension/keyword? mytable = formatter.parse(extensions=['AIPS FQ'], keywords=['EXTNAME']) assert len(mytable) == 1 assert mytable['hdu'][0] == "AIPS FQ" assert mytable['keyword'][0] == "EXTNAME" assert mytable['value'][0] == "AIPS FQ" # Is an incorrect extension dealt with gracefully? mytable = formatter.parse(extensions=['DOES_NOT_EXIST']) assert mytable is None # Is an incorrect keyword dealt with gracefully? mytable = formatter.parse(extensions=['AIPS FQ'], keywords=['DOES_NOT_EXIST']) assert mytable is None
3d5f7c090a12b3c06e0ee4ed9a3076ae0d6856ef8bd9a417a6e89c5873ec0d9e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest import os from . import FitsTestCase from ..convenience import writeto from ..hdu import PrimaryHDU, hdulist from ..scripts import fitsdiff from ....tests.helper import catch_warnings from ....utils.exceptions import AstropyDeprecationWarning from ....version import version class TestFITSDiff_script(FitsTestCase): def test_noargs(self): with pytest.raises(SystemExit) as e: fitsdiff.main() assert e.value.code == 2 def test_oneargargs(self): with pytest.raises(SystemExit) as e: fitsdiff.main(["file1"]) assert e.value.code == 2 def test_nodiff(self): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main([tmp_a, tmp_b]) assert numdiff == 0 def test_onediff(self): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() b[1, 0] = 12 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main([tmp_a, tmp_b]) assert numdiff == 1 def test_manydiff(self, capsys): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a + 1 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main([tmp_a, tmp_b]) out, err = capsys.readouterr() assert numdiff == 1 assert out.splitlines()[-4:] == [ ' a> 9', ' b> 10', ' ...', ' 100 different pixels found (100.00% different).'] numdiff = fitsdiff.main(['-n', '1', tmp_a, tmp_b]) out, err = capsys.readouterr() assert numdiff == 1 assert out.splitlines()[-4:] == [ ' a> 0', ' b> 1', ' ...', ' 100 different pixels found (100.00% different).'] def test_outputfile(self): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() b[1, 0] = 12 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main(['-o', self.temp('diff.txt'), tmp_a, tmp_b]) assert numdiff == 1 with open(self.temp('diff.txt')) as f: out = f.read() assert out.splitlines()[-4:] == [ ' Data differs at [1, 2]:', ' a> 10', ' b> 12', ' 1 different pixels found (1.00% different).'] def test_atol(self): a = np.arange(100, dtype=float).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() b[1, 0] = 11 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b]) assert numdiff == 0 numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b]) assert numdiff == 1 def test_rtol(self): a = np.arange(100, dtype=float).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() b[1, 0] = 11 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b]) assert numdiff == 0 def test_rtol_diff(self, capsys): a = np.arange(100, dtype=float).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() b[1, 0] = 11 hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b]) assert numdiff == 1 out, err = capsys.readouterr() assert out == """ fitsdiff: {} a: {} b: {} Maximum number of different data values to be reported: 10 Relative tolerance: 0.01, Absolute tolerance: 0.0 Primary HDU:\n\n Data contains differences: Data differs at [1, 2]: a> 10.0 ? ^ b> 11.0 ? ^ 1 different pixels found (1.00% different).\n""".format(version, tmp_a, tmp_b) assert err == "" def test_fitsdiff_script_both_d_and_r(self, capsys): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) with catch_warnings(AstropyDeprecationWarning) as warning_lines: fitsdiff.main(["-r", "1e-4", "-d", "1e-2", tmp_a, tmp_b]) # `rtol` is always ignored when `tolerance` is provided assert warning_lines[0].category == AstropyDeprecationWarning assert (str(warning_lines[0].message) == '"-d" ("--difference-tolerance") was deprecated in version 2.0 ' 'and will be removed in a future version. ' 'Use "-r" ("--relative-tolerance") instead.') out, err = capsys.readouterr() assert out == """ fitsdiff: {} a: {} b: {} Maximum number of different data values to be reported: 10 Relative tolerance: 0.01, Absolute tolerance: 0.0 No differences found.\n""".format(version, tmp_a, tmp_b) def test_wildcard(self): tmp1 = self.temp("tmp_file1") with pytest.raises(SystemExit) as e: fitsdiff.main([tmp1+"*", "ACME"]) assert e.value.code == 2 def test_not_quiet(self, capsys): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main([tmp_a, tmp_b]) assert numdiff == 0 out, err = capsys.readouterr() assert out == """ fitsdiff: {} a: {} b: {} Maximum number of different data values to be reported: 10 Relative tolerance: 0.0, Absolute tolerance: 0.0 No differences found.\n""".format(version, tmp_a, tmp_b) assert err == "" def test_quiet(self, capsys): a = np.arange(100).reshape(10, 10) hdu_a = PrimaryHDU(data=a) b = a.copy() hdu_b = PrimaryHDU(data=b) tmp_a = self.temp('testa.fits') tmp_b = self.temp('testb.fits') hdu_a.writeto(tmp_a) hdu_b.writeto(tmp_b) numdiff = fitsdiff.main(["-q", tmp_a, tmp_b]) assert numdiff == 0 out, err = capsys.readouterr() assert out == "" assert err == "" def test_path(self, capsys): os.mkdir(self.temp('sub/')) tmp_b = self.temp('sub/ascii.fits') tmp_g = self.temp('sub/group.fits') tmp_h = self.data('group.fits') with hdulist.fitsopen(tmp_h) as hdu_b: hdu_b.writeto(tmp_g) writeto(tmp_b, np.arange(100).reshape(10, 10)) # one modified file and a directory assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1 assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1 # two directories tmp_d = self.temp('sub/') assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1 assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1 assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == 0 # no match tmp_c = self.data('arange.fits') fitsdiff.main([tmp_c, tmp_d]) out, err = capsys.readouterr() assert "'arange.fits' has no match in" in err # globbing assert fitsdiff.main(["-q", self.data_dir+'/*.fits', self.data_dir]) == 0 assert fitsdiff.main(["-q", self.data_dir+'/g*.fits', tmp_d]) == 0 # one file and a directory tmp_f = self.data('tb.fits') assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0 assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
41a76640e92811434c37e8a0813f2a5e764414f6b96fe5665d2cbff176032f13
# Licensed under a 3-clause BSD style license - see PYFITS.rst import sys import numpy as np from ....io import fits from . import FitsTestCase def compare_arrays(arr1in, arr2in, verbose=False): """ Compare the values field-by-field in two sets of numpy arrays or recarrays. """ arr1 = arr1in.view(np.ndarray) arr2 = arr2in.view(np.ndarray) nfail = 0 for n2 in arr2.dtype.names: n1 = n2 if n1 not in arr1.dtype.names: n1 = n1.lower() if n1 not in arr1.dtype.names: n1 = n1.upper() if n1 not in arr1.dtype.names: raise ValueError('field name {} not found in array 1'.format(n2)) if verbose: sys.stdout.write(" testing field: '{}'\n".format(n2)) sys.stdout.write(' shape...........') if arr2[n2].shape != arr1[n1].shape: nfail += 1 if verbose: sys.stdout.write('shapes differ\n') else: if verbose: sys.stdout.write('OK\n') sys.stdout.write(' elements........') w, = np.where(arr1[n1].ravel() != arr2[n2].ravel()) if w.size > 0: nfail += 1 if verbose: sys.stdout.write( '\n {} elements in field {} differ\n'.format( w.size, n2)) else: if verbose: sys.stdout.write('OK\n') if nfail == 0: if verbose: sys.stdout.write('All tests passed\n') return True else: if verbose: sys.stdout.write('{} differences found\n'.format(nfail)) return False def get_test_data(verbose=False): st = np.zeros(3, [('f1', 'i4'), ('f2', 'S6'), ('f3', '>2f8')]) np.random.seed(35) st['f1'] = [1, 3, 5] st['f2'] = ['hello', 'world', 'byebye'] st['f3'] = np.random.random(st['f3'].shape) return st class TestStructured(FitsTestCase): def test_structured(self): fname = self.data('stddata.fits') data1, h1 = fits.getdata(fname, ext=1, header=True) data2, h2 = fits.getdata(fname, ext=2, header=True) st = get_test_data() outfile = self.temp('test.fits') fits.writeto(outfile, data1, overwrite=True) fits.append(outfile, data2) fits.append(outfile, st) assert st.dtype.isnative assert np.all(st['f1'] == [1, 3, 5]) data1check, h1check = fits.getdata(outfile, ext=1, header=True) data2check, h2check = fits.getdata(outfile, ext=2, header=True) stcheck, sthcheck = fits.getdata(outfile, ext=3, header=True) assert compare_arrays(data1, data1check, verbose=True) assert compare_arrays(data2, data2check, verbose=True) assert compare_arrays(st, stcheck, verbose=True) # try reading with view dataviewcheck, hviewcheck = fits.getdata(outfile, ext=2, header=True, view=np.ndarray) assert compare_arrays(data2, dataviewcheck, verbose=True)
cecf7dad8290fb773cc217661e5ccb9b55c58bdd94a4c94856b2c1e0905aa37a
# Licensed under a 3-clause BSD style license - see PYFITS.rst import pytest import numpy as np from ....io import fits from ..compression import compress_hdu from . import FitsTestCase MAX_INT = np.iinfo(np.intc).max MAX_LONG = np.iinfo(np.long).max MAX_LONGLONG = np.iinfo(np.longlong).max class TestCompressionFunction(FitsTestCase): def test_wrong_argument_number(self): with pytest.raises(TypeError): compress_hdu(1, 2) def test_unknown_compression_type(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header['ZCMPTYPE'] = 'fun' with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert 'Unrecognized compression type: fun' in str(exc) def test_zbitpix_unknown(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header['ZBITPIX'] = 13 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert 'Invalid value for BITPIX: 13' in str(exc) def test_data_none(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu.data = None with pytest.raises(TypeError) as exc: compress_hdu(hdu) assert 'CompImageHDU.data must be a numpy.ndarray' in str(exc) def test_missing_internal_header(self): hdu = fits.CompImageHDU(np.ones((10, 10))) del hdu._header with pytest.raises(AttributeError) as exc: compress_hdu(hdu) assert '_header' in str(exc) def test_invalid_tform(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header['TFORM1'] = 'TX' with pytest.raises(RuntimeError) as exc: compress_hdu(hdu) assert 'TX' in str(exc) and 'TFORM' in str(exc) def test_invalid_zdither(self): hdu = fits.CompImageHDU(np.ones((10, 10)), quantize_method=1) hdu._header['ZDITHER0'] = 'a' with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['ZNAXIS', 'ZBITPIX']) def test_header_missing_keyword(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) del hdu._header[kw] with pytest.raises(KeyError) as exc: compress_hdu(hdu) assert kw in str(exc) @pytest.mark.parametrize('kw', ['ZNAXIS', 'ZVAL1', 'ZVAL2', 'ZBLANK', 'BLANK']) def test_header_value_int_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_INT + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['ZTILE1', 'ZNAXIS1']) def test_header_value_long_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_LONG + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['NAXIS1', 'NAXIS2', 'TNULL1', 'PCOUNT', 'THEAP']) def test_header_value_longlong_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_LONGLONG + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['ZVAL3']) def test_header_value_float_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = 1e300 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['NAXIS1', 'NAXIS2', 'TFIELDS', 'PCOUNT']) def test_header_value_negative(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = -1 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert '{} should not be negative.'.format(kw) in str(exc) @pytest.mark.parametrize( ('kw', 'limit'), [('ZNAXIS', 999), ('TFIELDS', 999)]) def test_header_value_exceeds_custom_limit(self, kw, limit): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = limit + 1 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert kw in str(exc) @pytest.mark.parametrize('kw', ['TTYPE1', 'TFORM1', 'ZCMPTYPE', 'ZNAME1', 'ZQUANTIZ']) def test_header_value_no_string(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = 1 with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['TZERO1', 'TSCAL1']) def test_header_value_no_double(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = '1' with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize('kw', ['ZSCALE', 'ZZERO']) def test_header_value_no_double_int_image(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10), dtype=np.int32)) hdu._header[kw] = '1' with pytest.raises(TypeError): compress_hdu(hdu)
4f46b188f4a96a9a67cc64681f8557fa3544d9040be81d219e448f3cddee9ff4
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from . import FitsTestCase from ..scripts import fitsheader class TestFITSheader_script(FitsTestCase): def test_noargs(self): with pytest.raises(SystemExit) as e: fitsheader.main(['-h']) assert e.value.code == 0 def test_file_exists(self, capsys): fitsheader.main([self.data('arange.fits')]) out, err = capsys.readouterr() assert out.splitlines()[1].startswith( 'SIMPLE = T / conforms to FITS standard') assert err == '' def test_by_keyword(self, capsys): fitsheader.main(['-k', 'NAXIS', self.data('arange.fits')]) out, err = capsys.readouterr() assert out.splitlines()[1].startswith( 'NAXIS = 3 / number of array dimensions') fitsheader.main(['-k', 'NAXIS*', self.data('arange.fits')]) out, err = capsys.readouterr() out = out.splitlines() assert len(out) == 5 assert out[1].startswith('NAXIS') assert out[2].startswith('NAXIS1') assert out[3].startswith('NAXIS2') assert out[4].startswith('NAXIS3') fitsheader.main(['-k', 'RANDOMKEY', self.data('arange.fits')]) out, err = capsys.readouterr() assert err.startswith('WARNING') and 'RANDOMKEY' in err assert not err.startswith('ERROR') def test_by_extension(self, capsys): fitsheader.main(['-e', '1', self.data('test0.fits')]) out, err = capsys.readouterr() assert len(out.splitlines()) == 62 fitsheader.main(['-e', '3', '-k', 'BACKGRND', self.data('test0.fits')]) out, err = capsys.readouterr() assert out.splitlines()[1].startswith('BACKGRND= 312.') fitsheader.main(['-e', '0', '-k', 'BACKGRND', self.data('test0.fits')]) out, err = capsys.readouterr() assert err.startswith('WARNING') fitsheader.main(['-e', '3', '-k', 'FOO', self.data('test0.fits')]) out, err = capsys.readouterr() assert err.startswith('WARNING') def test_table(self, capsys): fitsheader.main(['-t', '-k', 'BACKGRND', self.data('test0.fits')]) out, err = capsys.readouterr() out = out.splitlines() assert len(out) == 5 assert out[1].endswith('| 1 | BACKGRND | 316.0 |') assert out[2].endswith('| 2 | BACKGRND | 351.0 |') assert out[3].endswith('| 3 | BACKGRND | 312.0 |') assert out[4].endswith('| 4 | BACKGRND | 323.0 |') fitsheader.main(['-t', '-e', '0', '-k', 'NAXIS', self.data('arange.fits'), self.data('ascii.fits'), self.data('blank.fits')]) out, err = capsys.readouterr() out = out.splitlines() assert len(out) == 4 assert out[1].endswith('| 0 | NAXIS | 3 |') assert out[2].endswith('| 0 | NAXIS | 0 |') assert out[3].endswith('| 0 | NAXIS | 2 |')
57efe26b033b908e3745460dc3c2d8cae64353d15f2fc660e66cb31351145f00
import os import gc import pathlib import warnings import pytest import numpy as np from numpy.testing import assert_allclose from .. import HDUList, PrimaryHDU, BinTableHDU from ... import fits from .... import units as u from ....table import Table, QTable, NdarrayMixin, Column from ....tests.helper import catch_warnings from ....units.format.fits import UnitScaleError from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation from ....time import Time, TimeDelta from ....tests.helper import quantity_allclose from ....units.quantity import QuantityInfo try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False DATA = os.path.join(os.path.dirname(__file__), 'data') def equal_data(a, b): for name in a.dtype.names: if not np.all(a[name] == b[name]): return False return True class TestSingleTable: def setup_class(self): self.data = np.array(list(zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'], [2.3, 4.5, 6.7, 8.9])), dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)]) def test_simple(self, tmpdir): filename = str(tmpdir.join('test_simple.fts')) t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) def test_simple_pathlib(self, tmpdir): filename = pathlib.Path(str(tmpdir.join('test_simple.fit'))) t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) def test_simple_meta(self, tmpdir): filename = str(tmpdir.join('test_simple.fits')) t1 = Table(self.data) t1.meta['A'] = 1 t1.meta['B'] = 2.3 t1.meta['C'] = 'spam' t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment'] t1.meta['HISTORY'] = ['first', 'second', 'third'] t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) for key in t1.meta: if isinstance(t1.meta, list): for i in range(len(t1.meta[key])): assert t1.meta[key][i] == t2.meta[key][i] else: assert t1.meta[key] == t2.meta[key] def test_simple_meta_conflicting(self, tmpdir): filename = str(tmpdir.join('test_simple.fits')) t1 = Table(self.data) t1.meta['ttype1'] = 'spam' with catch_warnings() as l: t1.write(filename, overwrite=True) assert len(l) == 1 assert str(l[0].message).startswith( 'Meta-data keyword ttype1 will be ignored since it conflicts with a FITS reserved keyword') def test_simple_noextension(self, tmpdir): """ Test that file type is recognized without extension """ filename = str(tmpdir.join('test_simple')) t1 = Table(self.data) t1.write(filename, overwrite=True, format='fits') t2 = Table.read(filename) assert equal_data(t1, t2) @pytest.mark.parametrize('table_type', (Table, QTable)) def test_with_units(self, table_type, tmpdir): filename = str(tmpdir.join('test_with_units.fits')) t1 = table_type(self.data) t1['a'].unit = u.m t1['c'].unit = u.km / u.s t1.write(filename, overwrite=True) t2 = table_type.read(filename) assert equal_data(t1, t2) assert t2['a'].unit == u.m assert t2['c'].unit == u.km / u.s def test_masked(self, tmpdir): filename = str(tmpdir.join('test_masked.fits')) t1 = Table(self.data, masked=True) t1.mask['a'] = [1, 0, 1, 0] t1.mask['b'] = [1, 0, 0, 1] t1.mask['c'] = [0, 1, 1, 0] t1.write(filename, overwrite=True) t2 = Table.read(filename) assert t2.masked assert equal_data(t1, t2) assert np.all(t1['a'].mask == t2['a'].mask) # Disabled for now, as there is no obvious way to handle masking of # non-integer columns in FITS # TODO: Re-enable these tests if some workaround for this can be found # assert np.all(t1['b'].mask == t2['b'].mask) # assert np.all(t1['c'].mask == t2['c'].mask) def test_masked_nan(self, tmpdir): filename = str(tmpdir.join('test_masked_nan.fits')) data = np.array(list(zip([5.2, 8.4, 3.9, 6.3], [2.3, 4.5, 6.7, 8.9])), dtype=[(str('a'), np.float64), (str('b'), np.float32)]) t1 = Table(data, masked=True) t1.mask['a'] = [1, 0, 1, 0] t1.mask['b'] = [1, 0, 0, 1] t1.write(filename, overwrite=True) t2 = Table.read(filename) np.testing.assert_array_almost_equal(t2['a'], [np.nan, 8.4, np.nan, 6.3]) np.testing.assert_array_almost_equal(t2['b'], [np.nan, 4.5, 6.7, np.nan]) # assert t2.masked # t2.masked = false currently, as the only way to determine whether a table is masked # while reading is to check whether col.null is present. For float columns, col.null # is not initialized def test_read_from_fileobj(self, tmpdir): filename = str(tmpdir.join('test_read_from_fileobj.fits')) hdu = BinTableHDU(self.data) hdu.writeto(filename, overwrite=True) with open(filename, 'rb') as f: t = Table.read(f) assert equal_data(t, self.data) def test_read_with_nonstandard_units(self): hdu = BinTableHDU(self.data) hdu.columns[0].unit = 'RADIANS' hdu.columns[1].unit = 'spam' hdu.columns[2].unit = 'millieggs' t = Table.read(hdu) assert equal_data(t, self.data) def test_memmap(self, tmpdir): filename = str(tmpdir.join('test_simple.fts')) t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename, memmap=False) t3 = Table.read(filename, memmap=True) assert equal_data(t2, t3) # To avoid issues with --open-files, we need to remove references to # data that uses memory mapping and force the garbage collection del t1, t2, t3 gc.collect() @pytest.mark.parametrize('memmap', (False, True)) def test_character_as_bytes(self, tmpdir, memmap): filename = str(tmpdir.join('test_simple.fts')) t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename, character_as_bytes=False, memmap=memmap) t3 = Table.read(filename, character_as_bytes=True, memmap=memmap) assert t2['b'].dtype.kind == 'U' assert t3['b'].dtype.kind == 'S' assert equal_data(t2, t3) # To avoid issues with --open-files, we need to remove references to # data that uses memory mapping and force the garbage collection del t1, t2, t3 gc.collect() class TestMultipleHDU: def setup_class(self): self.data1 = np.array(list(zip([1, 2, 3, 4], ['a', 'b', 'c', 'd'], [2.3, 4.5, 6.7, 8.9])), dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)]) self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])), dtype=[(str('p'), float), (str('q'), float)]) hdu1 = PrimaryHDU() hdu2 = BinTableHDU(self.data1, name='first') hdu3 = BinTableHDU(self.data2, name='second') self.hdus = HDUList([hdu1, hdu2, hdu3]) def teardown_class(self): del self.hdus def setup_method(self, method): warnings.filterwarnings('always') def test_read(self, tmpdir): filename = str(tmpdir.join('test_read.fits')) self.hdus.writeto(filename) with catch_warnings() as l: t = Table.read(filename) assert len(l) == 1 assert str(l[0].message).startswith( 'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)') assert equal_data(t, self.data1) def test_read_with_hdu_0(self, tmpdir): filename = str(tmpdir.join('test_read_with_hdu_0.fits')) self.hdus.writeto(filename) with pytest.raises(ValueError) as exc: Table.read(filename, hdu=0) assert exc.value.args[0] == 'No table found in hdu=0' @pytest.mark.parametrize('hdu', [1, 'first']) def test_read_with_hdu_1(self, tmpdir, hdu): filename = str(tmpdir.join('test_read_with_hdu_1.fits')) self.hdus.writeto(filename) with catch_warnings() as l: t = Table.read(filename, hdu=hdu) assert len(l) == 0 assert equal_data(t, self.data1) @pytest.mark.parametrize('hdu', [2, 'second']) def test_read_with_hdu_2(self, tmpdir, hdu): filename = str(tmpdir.join('test_read_with_hdu_2.fits')) self.hdus.writeto(filename) with catch_warnings() as l: t = Table.read(filename, hdu=hdu) assert len(l) == 0 assert equal_data(t, self.data2) def test_read_from_hdulist(self): with catch_warnings() as l: t = Table.read(self.hdus) assert len(l) == 1 assert str(l[0].message).startswith( 'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)') assert equal_data(t, self.data1) def test_read_from_hdulist_with_hdu_0(self, tmpdir): with pytest.raises(ValueError) as exc: Table.read(self.hdus, hdu=0) assert exc.value.args[0] == 'No table found in hdu=0' @pytest.mark.parametrize('hdu', [1, 'first']) def test_read_from_hdulist_with_hdu_1(self, tmpdir, hdu): with catch_warnings() as l: t = Table.read(self.hdus, hdu=hdu) assert len(l) == 0 assert equal_data(t, self.data1) @pytest.mark.parametrize('hdu', [2, 'second']) def test_read_from_hdulist_with_hdu_2(self, tmpdir, hdu): with catch_warnings() as l: t = Table.read(self.hdus, hdu=hdu) assert len(l) == 0 assert equal_data(t, self.data2) def test_read_from_single_hdu(self): with catch_warnings() as l: t = Table.read(self.hdus[1]) assert len(l) == 0 assert equal_data(t, self.data1) def test_masking_regression_1795(): """ Regression test for #1795 - this bug originally caused columns where TNULL was not defined to have their first element masked. """ t = Table.read(os.path.join(DATA, 'tb.fits')) assert np.all(t['c1'].mask == np.array([False, False])) assert np.all(t['c2'].mask == np.array([False, False])) assert np.all(t['c3'].mask == np.array([False, False])) assert np.all(t['c4'].mask == np.array([False, False])) assert np.all(t['c1'].data == np.array([1, 2])) assert np.all(t['c2'].data == np.array([b'abc', b'xy '])) assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139])) assert np.all(t['c4'].data == np.array([False, True])) def test_scale_error(): a = [1, 4, 5] b = [2.0, 5.0, 8.2] c = ['x', 'y', 'z'] t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'}) t['a'].unit = '1.2' with pytest.raises(UnitScaleError) as exc: t.write('t.fits', format='fits', overwrite=True) assert exc.value.args[0] == "The column 'a' could not be stored in FITS format because it has a scale '(1.2)' that is not recognized by the FITS standard. Either scale the data or change the units." def test_bool_column(tmpdir): """ Regression test for https://github.com/astropy/astropy/issues/1953 Ensures that Table columns of bools are properly written to a FITS table. """ arr = np.ones(5, dtype=bool) arr[::2] == np.False_ t = Table([arr]) t.write(str(tmpdir.join('test.fits')), overwrite=True) with fits.open(str(tmpdir.join('test.fits'))) as hdul: assert hdul[1].data['col0'].dtype == np.dtype('bool') assert np.all(hdul[1].data['col0'] == arr) def test_unicode_column(tmpdir): """ Test that a column of unicode strings is still written as one byte-per-character in the FITS table (so long as the column can be ASCII encoded). Regression test for one of the issues fixed in https://github.com/astropy/astropy/pull/4228 """ t = Table([np.array([u'a', u'b', u'cd'])]) t.write(str(tmpdir.join('test.fits')), overwrite=True) with fits.open(str(tmpdir.join('test.fits'))) as hdul: assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd']) assert hdul[1].header['TFORM1'] == '2A' t2 = Table([np.array([u'\N{SNOWMAN}'])]) with pytest.raises(UnicodeEncodeError): t2.write(str(tmpdir.join('test.fits')), overwrite=True) def test_unit_warnings_read_write(tmpdir): filename = str(tmpdir.join('test_unit.fits')) t1 = Table([[1, 2], [3, 4]], names=['a', 'b']) t1['a'].unit = 'm/s' t1['b'].unit = 'not-a-unit' with catch_warnings() as l: t1.write(filename, overwrite=True) assert len(l) == 1 assert str(l[0].message).startswith("'not-a-unit' did not parse as fits unit") with catch_warnings() as l: Table.read(filename, hdu=1) assert len(l) == 0 def test_convert_comment_convention(tmpdir): """ Regression test for https://github.com/astropy/astropy/issues/6079 """ filename = os.path.join(DATA, 'stddata.fits') t = Table.read(filename) assert t.meta['comments'] == [ '', ' *** End of mandatory fields ***', '', '', ' *** Column names ***', '', '', ' *** Column formats ***', '' ] def assert_objects_equal(obj1, obj2, attrs, compare_class=True): if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta'] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split('.'): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] # Mixin info.meta can None instead of empty OrderedDict(), #6720 would # fix this. if attr == 'info.meta': if a1 is None: a1 = {} if a2 is None: a2 = {} assert np.all(a1 == a2) # Testing FITS table read/write with mixins. This is mostly # copied from ECSV mixin testing. el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km) el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') scc = sc.copy() scc.representation = 'cartesian' tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el) mixin_cols = { 'tm': tm, 'dt': TimeDelta([1, 2] * u.day), 'sc': sc, 'scc': scc, 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', obstime=['J1990.5', 'J1991.5']), 'q': [1, 2] * u.m, 'lat': Latitude([1, 2] * u.deg), 'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg), 'ang': Angle([1, 2] * u.deg), 'el2': el2, } time_attrs = ['value', 'shape', 'format', 'scale', 'location'] compare_attrs = { 'c1': ['data'], 'c2': ['data'], 'tm': time_attrs, 'dt': ['shape', 'value', 'format', 'scale'], 'sc': ['ra', 'dec', 'representation', 'frame.name'], 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], 'q': ['value', 'unit'], 'lon': ['value', 'unit', 'wrap_angle'], 'lat': ['value', 'unit'], 'ang': ['value', 'unit'], 'el2': ['x', 'y', 'z', 'ellipsoid'], 'nd': ['x', 'y', 'z'], } @pytest.mark.skipif('not HAS_YAML') def test_fits_mixins_qtable_to_table(tmpdir): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ filename = str(tmpdir.join('test_simple.fits')) names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) t.write(filename, format='fits') t2 = Table.read(filename, format='fits', astropy_native=True) assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] # Special-case Time, which does not yet support round-tripping # the format. if isinstance(col2, Time): col2.format = col.format attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ['unit'] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.all(col.value == col2) assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_fits_mixins_as_one(table_cls, tmpdir): """Test write/read all cols at once and validate intermediate column names""" filename = str(tmpdir.join('test_simple.fits')) names = sorted(mixin_cols) serialized_names = ['ang', 'dt.jd1', 'dt.jd2', 'el2.x', 'el2.y', 'el2.z', 'lat', 'lon', 'q', 'sc.ra', 'sc.dec', 'scc.x', 'scc.y', 'scc.z', 'scd.ra', 'scd.dec', 'scd.distance', 'scd.obstime.jd1', 'scd.obstime.jd2', 'tm', # serialize_method is formatted_value ] t = table_cls([mixin_cols[name] for name in names], names=names) t.meta['C'] = 'spam' t.meta['comments'] = ['this', 'is', 'a', 'comment'] t.meta['history'] = ['first', 'second', 'third'] t.write(filename, format="fits") t2 = table_cls.read(filename, format='fits', astropy_native=True) assert t2.meta['C'] == 'spam' assert t2.meta['comments'] == ['this', 'is', 'a', 'comment'] assert t2.meta['HISTORY'] == ['first', 'second', 'third'] assert t.colnames == t2.colnames # Read directly via fits and confirm column names hdus = fits.open(filename) assert hdus[1].columns.names == serialized_names @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('name_col', list(mixin_cols.items())) @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_fits_mixins_per_column(table_cls, name_col, tmpdir): """Test write/read one col at a time and do detailed validation""" filename = str(tmpdir.join('test_simple.fits')) name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=['c1', name, 'c2']) t[name].info.description = 'my description' t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}} if not t.has_mixin_columns: pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') if isinstance(t[name], NdarrayMixin): pytest.xfail('NdarrayMixin not supported') t.write(filename, format="fits") t2 = table_cls.read(filename, format='fits', astropy_native=True) assert t.colnames == t2.colnames for colname in t.colnames: assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) # Special case to make sure Column type doesn't leak into Time class data if name.startswith('tm'): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray @pytest.mark.skipif('HAS_YAML') def test_warn_for_dropped_info_attributes(tmpdir): filename = str(tmpdir.join('test.fits')) t = Table([[1, 2]]) t['col0'].info.description = 'hello' with catch_warnings() as warns: t.write(filename, overwrite=True) assert len(warns) == 1 assert str(warns[0].message).startswith( "table contains column(s) with defined 'format'") @pytest.mark.skipif('HAS_YAML') def test_error_for_mixins_but_no_yaml(tmpdir): filename = str(tmpdir.join('test.fits')) t = Table([mixin_cols['sc']]) with pytest.raises(TypeError) as err: t.write(filename) assert "cannot write type SkyCoord column 'col0' to FITS without PyYAML" in str(err) @pytest.mark.skipif('not HAS_YAML') def test_info_attributes_with_no_mixins(tmpdir): """Even if there are no mixin columns, if there is metadata that would be lost it still gets serialized """ filename = str(tmpdir.join('test.fits')) t = Table([[1.0, 2.0]]) t['col0'].description = 'hello' * 40 t['col0'].format = '%8.4f' t['col0'].meta['a'] = {'b': 'c'} t.write(filename, overwrite=True) t2 = Table.read(filename) assert t2['col0'].description == 'hello' * 40 assert t2['col0'].format == '%8.4f' assert t2['col0'].meta['a'] == {'b': 'c'}
b1f67aff2dbf6feaa64d6bdf39538bed72c401b28ec6095a5d1611e8600a177f
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import bz2 import io import mmap import os import pathlib import warnings import zipfile import pytest import numpy as np from . import FitsTestCase from ..convenience import _getext from ..diff import FITSDiff from ..file import _File, GZIP_MAGIC from ....io import fits from ....tests.helper import raises, catch_warnings, ignore_warnings from ....utils.data import conf, get_pkg_data_filename from ....utils import data class TestCore(FitsTestCase): def test_with_statement(self): with fits.open(self.data('ascii.fits')) as f: pass @raises(OSError) def test_missing_file(self): fits.open(self.temp('does-not-exist.fits')) def test_filename_is_bytes_object(self): with pytest.raises(TypeError): fits.open(self.data('ascii.fits').encode()) def test_naxisj_check(self): hdulist = fits.open(self.data('o4sp040b0_raw.fits')) hdulist[1].header['NAXIS3'] = 500 assert 'NAXIS3' in hdulist[1].header hdulist.verify('silentfix') assert 'NAXIS3' not in hdulist[1].header def test_byteswap(self): p = fits.PrimaryHDU() l = fits.HDUList() n = np.zeros(3, dtype='i2') n[0] = 1 n[1] = 60000 n[2] = 2 c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768, array=n) t = fits.BinTableHDU.from_columns([c]) l.append(p) l.append(t) l.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as p: assert p[1].data[1]['foo'] == 60000.0 def test_fits_file_path_object(self): """ Testing when fits file is passed as pathlib.Path object #4412. """ fpath = pathlib.Path(get_pkg_data_filename('data/tdim.fits')) hdulist = fits.open(fpath) assert hdulist[0].filebytes() == 2880 assert hdulist[1].filebytes() == 5760 hdulist2 = fits.open(self.data('tdim.fits')) assert FITSDiff(hdulist2, hdulist).identical is True def test_add_del_columns(self): p = fits.ColDefs([]) p.add_col(fits.Column(name='FOO', format='3J')) p.add_col(fits.Column(name='BAR', format='1I')) assert p.names == ['FOO', 'BAR'] p.del_col('FOO') assert p.names == ['BAR'] def test_add_del_columns2(self): hdulist = fits.open(self.data('tb.fits')) table = hdulist[1] assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4') assert table.columns.names == ['c1', 'c2', 'c3', 'c4'] table.columns.del_col(str('c1')) assert table.data.dtype.names == ('c2', 'c3', 'c4') assert table.columns.names == ['c2', 'c3', 'c4'] table.columns.del_col(str('c3')) assert table.data.dtype.names == ('c2', 'c4') assert table.columns.names == ['c2', 'c4'] table.columns.add_col(fits.Column(str('foo'), str('3J'))) assert table.data.dtype.names == ('c2', 'c4', 'foo') assert table.columns.names == ['c2', 'c4', 'foo'] hdulist.writeto(self.temp('test.fits'), overwrite=True) with ignore_warnings(): # TODO: The warning raised by this test is actually indication of a # bug and should *not* be ignored. But as it is a known issue we # hide it for now. See # https://github.com/spacetelescope/PyFITS/issues/44 with fits.open(self.temp('test.fits')) as hdulist: table = hdulist[1] assert table.data.dtype.names == ('c2', 'c4', 'foo') assert table.columns.names == ['c2', 'c4', 'foo'] def test_update_header_card(self): """A very basic test for the Header.update method--I'd like to add a few more cases to this at some point. """ header = fits.Header() comment = 'number of bits per data pixel' header['BITPIX'] = (16, comment) assert 'BITPIX' in header assert header['BITPIX'] == 16 assert header.comments['BITPIX'] == comment header.update(BITPIX=32) assert header['BITPIX'] == 32 assert header.comments['BITPIX'] == '' def test_set_card_value(self): """Similar to test_update_header_card(), but tests the the `header['FOO'] = 'bar'` method of updating card values. """ header = fits.Header() comment = 'number of bits per data pixel' card = fits.Card.fromstring('BITPIX = 32 / {}'.format(comment)) header.append(card) header['BITPIX'] = 32 assert 'BITPIX' in header assert header['BITPIX'] == 32 assert header.cards[0].keyword == 'BITPIX' assert header.cards[0].value == 32 assert header.cards[0].comment == comment def test_uint(self): hdulist_f = fits.open(self.data('o4sp040b0_raw.fits'), uint=False) hdulist_i = fits.open(self.data('o4sp040b0_raw.fits'), uint=True) assert hdulist_f[1].data.dtype == np.float32 assert hdulist_i[1].data.dtype == np.uint16 assert np.all(hdulist_f[1].data == hdulist_i[1].data) def test_fix_missing_card_append(self): hdu = fits.ImageHDU() errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', []) assert len(errs) == 1 assert 'TESTKW' in hdu.header assert hdu.header['TESTKW'] == 'foo' assert hdu.header.cards[-1].keyword == 'TESTKW' def test_fix_invalid_keyword_value(self): hdu = fits.ImageHDU() hdu.header['TESTKW'] = 'foo' errs = hdu.req_cards('TESTKW', None, lambda v: v == 'foo', 'foo', 'ignore', []) assert len(errs) == 0 # Now try a test that will fail, and ensure that an error will be # raised in 'exception' mode errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'exception', []) assert len(errs) == 1 assert errs[0][1] == "'TESTKW' card has invalid value 'foo'." # See if fixing will work hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix', []) assert hdu.header['TESTKW'] == 'bar' @raises(fits.VerifyError) def test_unfixable_missing_card(self): class TestHDU(fits.hdu.base.NonstandardExtHDU): def _verify(self, option='warn'): errs = super()._verify(option) hdu.req_cards('TESTKW', None, None, None, 'fix', errs) return errs @classmethod def match_header(cls, header): # Since creating this HDU class adds it to the registry we # don't want the file reader to possibly think any actual # HDU from a file should be handled by this class return False hdu = TestHDU(header=fits.Header()) hdu.verify('fix') @raises(fits.VerifyError) def test_exception_on_verification_error(self): hdu = fits.ImageHDU() del hdu.header['XTENSION'] hdu.verify('exception') def test_ignore_verification_error(self): hdu = fits.ImageHDU() # The default here would be to issue a warning; ensure that no warnings # or exceptions are raised with catch_warnings(): warnings.simplefilter('error') del hdu.header['NAXIS'] try: hdu.verify('ignore') except Exception as exc: self.fail('An exception occurred when the verification error ' 'should have been ignored: {}'.format(exc)) # Make sure the error wasn't fixed either, silently or otherwise assert 'NAXIS' not in hdu.header @raises(ValueError) def test_unrecognized_verify_option(self): hdu = fits.ImageHDU() hdu.verify('foobarbaz') def test_errlist_basic(self): # Just some tests to make sure that _ErrList is setup correctly. # No arguments error_list = fits.verify._ErrList() assert error_list == [] # Some contents - this is not actually working, it just makes sure they # are kept. error_list = fits.verify._ErrList([1, 2, 3]) assert error_list == [1, 2, 3] def test_combined_verify_options(self): """ Test verify options like fix+ignore. """ def make_invalid_hdu(): hdu = fits.ImageHDU() # Add one keyword to the header that contains a fixable defect, and one # with an unfixable defect c1 = fits.Card.fromstring("test = ' test'") c2 = fits.Card.fromstring("P.I. = ' Hubble'") hdu.header.append(c1) hdu.header.append(c2) return hdu # silentfix+ignore should be completely silent hdu = make_invalid_hdu() with catch_warnings(): warnings.simplefilter('error') try: hdu.verify('silentfix+ignore') except Exception as exc: self.fail('An exception occurred when the verification error ' 'should have been ignored: {}'.format(exc)) # silentfix+warn should be quiet about the fixed HDU and only warn # about the unfixable one hdu = make_invalid_hdu() with catch_warnings() as w: hdu.verify('silentfix+warn') assert len(w) == 4 assert 'Illegal keyword name' in str(w[2].message) # silentfix+exception should only mention the unfixable error in the # exception hdu = make_invalid_hdu() try: hdu.verify('silentfix+exception') except fits.VerifyError as exc: assert 'Illegal keyword name' in str(exc) assert 'not upper case' not in str(exc) else: self.fail('An exception should have been raised.') # fix+ignore is not too useful, but it should warn about the fixed # problems while saying nothing about the unfixable problems hdu = make_invalid_hdu() with catch_warnings() as w: hdu.verify('fix+ignore') assert len(w) == 4 assert 'not upper case' in str(w[2].message) # fix+warn hdu = make_invalid_hdu() with catch_warnings() as w: hdu.verify('fix+warn') assert len(w) == 6 assert 'not upper case' in str(w[2].message) assert 'Illegal keyword name' in str(w[4].message) # fix+exception hdu = make_invalid_hdu() try: hdu.verify('fix+exception') except fits.VerifyError as exc: assert 'Illegal keyword name' in str(exc) assert 'not upper case' in str(exc) else: self.fail('An exception should have been raised.') def test_getext(self): """ Test the various different ways of specifying an extension header in the convenience functions. """ hl, ext = _getext(self.data('test0.fits'), 'readonly', 1) assert ext == 1 pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly', 1, 2) pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly', (1, 2)) pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly', 'sci', 'sci') pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', 1, 2, 3) hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=1) assert ext == 1 hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=('sci', 2)) assert ext == ('sci', 2) pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', 1, ext=('sci', 2), extver=3) pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', ext=('sci', 2), extver=3) hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci') assert ext == ('sci', 1) hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci', 1) assert ext == ('sci', 1) hl, ext = _getext(self.data('test0.fits'), 'readonly', ('sci', 1)) assert ext == ('sci', 1) hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci', extver=1, do_not_scale_image_data=True) assert ext == ('sci', 1) pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', 'sci', ext=1) pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', 'sci', 1, extver=2) hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci') assert ext == ('sci', 1) hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci', extver=1) assert ext == ('sci', 1) pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly', extver=1) def test_extension_name_case_sensitive(self): """ Tests that setting fits.conf.extension_name_case_sensitive at runtime works. """ hdu = fits.ImageHDU() hdu.name = 'sCi' assert hdu.name == 'SCI' assert hdu.header['EXTNAME'] == 'SCI' with fits.conf.set_temp('extension_name_case_sensitive', True): hdu = fits.ImageHDU() hdu.name = 'sCi' assert hdu.name == 'sCi' assert hdu.header['EXTNAME'] == 'sCi' hdu.name = 'sCi' assert hdu.name == 'SCI' assert hdu.header['EXTNAME'] == 'SCI' def test_hdu_fromstring(self): """ Tests creating a fully-formed HDU object from a string containing the bytes of the HDU. """ dat = open(self.data('test0.fits'), 'rb').read() offset = 0 with fits.open(self.data('test0.fits')) as hdul: hdulen = hdul[0]._data_offset + hdul[0]._data_size hdu = fits.PrimaryHDU.fromstring(dat[:hdulen]) assert isinstance(hdu, fits.PrimaryHDU) assert hdul[0].header == hdu.header assert hdu.data is None hdu.header['TEST'] = 'TEST' hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert isinstance(hdu, fits.PrimaryHDU) assert hdul[0].header[:-1] == hdu.header[:-1] assert hdul[0].header['TEST'] == 'TEST' assert hdu.data is None with fits.open(self.data('test0.fits'))as hdul: for ext_hdu in hdul[1:]: offset += hdulen hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen]) assert isinstance(hdu, fits.ImageHDU) assert ext_hdu.header == hdu.header assert (ext_hdu.data == hdu.data).all() def test_nonstandard_hdu(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157 Tests that "Nonstandard" HDUs with SIMPLE = F are read and written without prepending a superfluous and unwanted standard primary HDU. """ data = np.arange(100, dtype=np.uint8) hdu = fits.PrimaryHDU(data=data) hdu.header['SIMPLE'] = False hdu.writeto(self.temp('test.fits')) info = [(0, '', 1, 'NonstandardHDU', 5, (), '', '')] with fits.open(self.temp('test.fits')) as hdul: assert hdul.info(output=False) == info # NonstandardHDUs just treat the data as an unspecified array of # bytes. The first 100 bytes should match the original data we # passed in...the rest should be zeros padding out the rest of the # FITS block assert (hdul[0].data[:100] == data).all() assert (hdul[0].data[100:] == 0).all() def test_extname(self): """Test getting/setting the EXTNAME of an HDU.""" h1 = fits.PrimaryHDU() assert h1.name == 'PRIMARY' # Normally a PRIMARY HDU should not have an EXTNAME, though it should # have a default .name attribute assert 'EXTNAME' not in h1.header # The current version of the FITS standard does allow PRIMARY HDUs to # have an EXTNAME, however. h1.name = 'NOTREAL' assert h1.name == 'NOTREAL' assert h1.header.get('EXTNAME') == 'NOTREAL' # Updating the EXTNAME in the header should update the .name h1.header['EXTNAME'] = 'TOOREAL' assert h1.name == 'TOOREAL' # If we delete an EXTNAME keyword from a PRIMARY HDU it should go back # to the default del h1.header['EXTNAME'] assert h1.name == 'PRIMARY' # For extension HDUs the situation is a bit simpler: h2 = fits.ImageHDU() assert h2.name == '' assert 'EXTNAME' not in h2.header h2.name = 'HELLO' assert h2.name == 'HELLO' assert h2.header.get('EXTNAME') == 'HELLO' h2.header['EXTNAME'] = 'GOODBYE' assert h2.name == 'GOODBYE' def test_extver_extlevel(self): """Test getting/setting the EXTVER and EXTLEVEL of and HDU.""" # EXTVER and EXTNAME work exactly the same; their semantics are, for # now, to be inferred by the user. Although they should never be less # than 1, the standard does not explicitly forbid any value so long as # it's an integer h1 = fits.PrimaryHDU() assert h1.ver == 1 assert h1.level == 1 assert 'EXTVER' not in h1.header assert 'EXTLEVEL' not in h1.header h1.ver = 2 assert h1.header.get('EXTVER') == 2 h1.header['EXTVER'] = 3 assert h1.ver == 3 del h1.header['EXTVER'] h1.ver == 1 h1.level = 2 assert h1.header.get('EXTLEVEL') == 2 h1.header['EXTLEVEL'] = 3 assert h1.level == 3 del h1.header['EXTLEVEL'] assert h1.level == 1 pytest.raises(TypeError, setattr, h1, 'ver', 'FOO') pytest.raises(TypeError, setattr, h1, 'level', 'BAR') def test_consecutive_writeto(self): """ Regression test for an issue where calling writeto twice on the same HDUList could write a corrupted file. https://github.com/spacetelescope/PyFITS/issues/40 is actually a particular instance of this problem, though isn't unique to sys.stdout. """ with fits.open(self.data('test0.fits')) as hdul1: # Add a bunch of header keywords so that the data will be forced to # new offsets within the file: for idx in range(40): hdul1[1].header['TEST{}'.format(idx)] = 'test' hdul1.writeto(self.temp('test1.fits')) hdul1.writeto(self.temp('test2.fits')) # Open a second handle to the original file and compare it to hdul1 # (We only compare part of the one header that was modified) # Compare also with the second writeto output with fits.open(self.data('test0.fits')) as hdul2: with fits.open(self.temp('test2.fits')) as hdul3: for hdul in (hdul1, hdul3): for idx, hdus in enumerate(zip(hdul2, hdul)): hdu2, hdu = hdus if idx != 1: assert hdu.header == hdu2.header else: assert (hdu2.header == hdu.header[:len(hdu2.header)]) assert np.all(hdu.data == hdu2.data) class TestConvenienceFunctions(FitsTestCase): def test_writeto(self): """ Simple test for writing a trivial header and some data to a file with the `writeto()` convenience function. """ data = np.zeros((100, 100)) header = fits.Header() fits.writeto(self.temp('array.fits'), data, header=header, overwrite=True) hdul = fits.open(self.temp('array.fits')) assert len(hdul) == 1 assert (data == hdul[0].data).all() def test_writeto_2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107 Test of `writeto()` with a trivial header containing a single keyword. """ data = np.zeros((100, 100)) header = fits.Header() header.set('CRPIX1', 1.) fits.writeto(self.temp('array.fits'), data, header=header, overwrite=True, output_verify='silentfix') hdul = fits.open(self.temp('array.fits')) assert len(hdul) == 1 assert (data == hdul[0].data).all() assert 'CRPIX1' in hdul[0].header assert hdul[0].header['CRPIX1'] == 1.0 class TestFileFunctions(FitsTestCase): """ Tests various basic I/O operations, specifically in the astropy.io.fits.file._File class. """ def test_open_nonexistent(self): """Test that trying to open a non-existent file results in an OSError (and not some other arbitrary exception). """ try: fits.open(self.temp('foobar.fits')) except OSError as e: assert 'No such file or directory' in str(e) # But opening in ostream or append mode should be okay, since they # allow writing new files for mode in ('ostream', 'append'): with fits.open(self.temp('foobar.fits'), mode=mode) as h: pass assert os.path.exists(self.temp('foobar.fits')) os.remove(self.temp('foobar.fits')) def test_open_file_handle(self): # Make sure we can open a FITS file from an open file handle with open(self.data('test0.fits'), 'rb') as handle: with fits.open(handle) as fitsfile: pass with open(self.temp('temp.fits'), 'wb') as handle: with fits.open(handle, mode='ostream') as fitsfile: pass # Opening without explicitly specifying binary mode should fail with pytest.raises(ValueError): with open(self.data('test0.fits')) as handle: with fits.open(handle) as fitsfile: pass # All of these read modes should fail for mode in ['r', 'rt', 'r+', 'rt+', 'a', 'at', 'a+', 'at+']: with pytest.raises(ValueError): with open(self.data('test0.fits'), mode=mode) as handle: with fits.open(handle) as fitsfile: pass # These write modes should fail as well for mode in ['w', 'wt', 'w+', 'wt+']: with pytest.raises(ValueError): with open(self.temp('temp.fits'), mode=mode) as handle: with fits.open(handle) as fitsfile: pass def test_fits_file_handle_mode_combo(self): # This should work fine since no mode is given with open(self.data('test0.fits'), 'rb') as handle: with fits.open(handle) as fitsfile: pass # This should work fine since the modes are compatible with open(self.data('test0.fits'), 'rb') as handle: with fits.open(handle, mode='readonly') as fitsfile: pass # This should not work since the modes conflict with pytest.raises(ValueError): with open(self.data('test0.fits'), 'rb') as handle: with fits.open(handle, mode='ostream') as fitsfile: pass def test_open_from_url(self): import urllib.request file_url = "file:///" + self.data('test0.fits') with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj) as fits_handle: pass # It will not be possible to write to a file that is from a URL object for mode in ('ostream', 'append', 'update'): with pytest.raises(ValueError): with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj, mode=mode) as fits_handle: pass @pytest.mark.remote_data(source='astropy') def test_open_from_remote_url(self): import urllib.request for dataurl in (conf.dataurl, conf.dataurl_mirror): remote_url = '{}/{}'.format(dataurl, 'allsky/allsky_rosat.fits') try: with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj) as fits_handle: assert len(fits_handle) == 1 for mode in ('ostream', 'append', 'update'): with pytest.raises(ValueError): with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj, mode=mode) as fits_handle: assert len(fits_handle) == 1 except (urllib.error.HTTPError, urllib.error.URLError): continue else: break else: raise Exception("Could not download file") def test_open_gzipped(self): gzip_file = self._make_gzip_file() with ignore_warnings(): with fits.open(gzip_file) as fits_handle: assert fits_handle._file.compression == 'gzip' assert len(fits_handle) == 5 with fits.open(gzip.GzipFile(gzip_file)) as fits_handle: assert fits_handle._file.compression == 'gzip' assert len(fits_handle) == 5 def test_open_gzipped_from_handle(self): with open(self._make_gzip_file(), 'rb') as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == 'gzip' def test_detect_gzipped(self): """Test detection of a gzip file when the extension is not .gz.""" with ignore_warnings(): with fits.open(self._make_gzip_file('test0.fz')) as fits_handle: assert fits_handle._file.compression == 'gzip' assert len(fits_handle) == 5 def test_writeto_append_mode_gzip(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/33 Check that a new GzipFile opened in append mode can be used to write out a new FITS file. """ # Note: when opening a GzipFile the 'b+' is superfluous, but this was # still how the original test case looked # Note: with statement not supported on GzipFile in older Python # versions fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+') h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp('test.fits.gz')) as hdul: assert hdul[0].header == h.header def test_open_bzipped(self): bzip_file = self._make_bzip2_file() with ignore_warnings(): with fits.open(bzip_file) as fits_handle: assert fits_handle._file.compression == 'bzip2' assert len(fits_handle) == 5 with fits.open(bz2.BZ2File(bzip_file)) as fits_handle: assert fits_handle._file.compression == 'bzip2' assert len(fits_handle) == 5 def test_open_bzipped_from_handle(self): with open(self._make_bzip2_file(), 'rb') as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == 'bzip2' assert len(fits_handle) == 5 def test_detect_bzipped(self): """Test detection of a bzip2 file when the extension is not .bz2.""" with ignore_warnings(): with fits.open(self._make_bzip2_file('test0.xx')) as fits_handle: assert fits_handle._file.compression == 'bzip2' assert len(fits_handle) == 5 def test_writeto_bzip2_fileobj(self): """Test writing to a bz2.BZ2File file like object""" fileobj = bz2.BZ2File(self.temp('test.fits.bz2'), 'w') h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp('test.fits.bz2')) as hdul: assert hdul[0].header == h.header def test_writeto_bzip2_filename(self): """Test writing to a bzip2 file by name""" filename = self.temp('testname.fits.bz2') h = fits.PrimaryHDU() h.writeto(filename) with fits.open(self.temp('testname.fits.bz2')) as hdul: assert hdul[0].header == h.header def test_open_zipped(self): zip_file = self._make_zip_file() with ignore_warnings(): with fits.open(zip_file) as fits_handle: assert fits_handle._file.compression == 'zip' assert len(fits_handle) == 5 with fits.open(zipfile.ZipFile(zip_file)) as fits_handle: assert fits_handle._file.compression == 'zip' assert len(fits_handle) == 5 def test_open_zipped_from_handle(self): with open(self._make_zip_file(), 'rb') as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == 'zip' assert len(fits_handle) == 5 def test_detect_zipped(self): """Test detection of a zip file when the extension is not .zip.""" zf = self._make_zip_file(filename='test0.fz') with ignore_warnings(): assert len(fits.open(zf)) == 5 def test_open_zipped_writeable(self): """Opening zipped files in a writeable mode should fail.""" zf = self._make_zip_file() pytest.raises(OSError, fits.open, zf, 'update') pytest.raises(OSError, fits.open, zf, 'append') zf = zipfile.ZipFile(zf, 'a') pytest.raises(OSError, fits.open, zf, 'update') pytest.raises(OSError, fits.open, zf, 'append') def test_read_open_astropy_gzip_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2774 This tests reading from a ``GzipFile`` object from Astropy's compatibility copy of the ``gzip`` module. """ gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() @raises(OSError) def test_open_multiple_member_zipfile(self): """ Opening zip files containing more than one member files should fail as there's no obvious way to specify which file is the FITS file to read. """ zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w') zfile.write(self.data('test0.fits')) zfile.writestr('foo', 'bar') zfile.close() fits.open(zfile.filename) def test_read_open_file(self): """Read from an existing file object.""" with open(self.data('test0.fits'), 'rb') as f: assert len(fits.open(f)) == 5 def test_read_closed_file(self): """Read from an existing file object that's been closed.""" f = open(self.data('test0.fits'), 'rb') f.close() assert len(fits.open(f)) == 5 def test_read_open_gzip_file(self): """Read from an open gzip file object.""" gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() def test_open_gzip_file_for_writing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195.""" gf = self._make_gzip_file() with fits.open(gf, mode='update') as h: h[0].header['EXPFLAG'] = 'ABNORMAL' h[1].data[0, 0] = 1 with fits.open(gf) as h: # Just to make sur ethe update worked; if updates work # normal writes should work too... assert h[0].header['EXPFLAG'] == 'ABNORMAL' assert h[1].data[0, 0] == 1 def test_write_read_gzip_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2794 Ensure files written through gzip are readable. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) hdu.writeto(self.temp('test.fits.gz')) with open(self.temp('test.fits.gz'), 'rb') as f: assert f.read(3) == GZIP_MAGIC with fits.open(self.temp('test.fits.gz')) as hdul: assert np.all(hdul[0].data == data) def test_read_file_like_object(self): """Test reading a FITS file from a file-like object.""" filelike = io.BytesIO() with open(self.data('test0.fits'), 'rb') as f: filelike.write(f.read()) filelike.seek(0) with ignore_warnings(): assert len(fits.open(filelike)) == 5 def test_updated_file_permissions(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79 Tests that when a FITS file is modified in update mode, the file permissions are preserved. """ filename = self.temp('test.fits') hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul.writeto(filename) old_mode = os.stat(filename).st_mode hdul = fits.open(filename, mode='update') hdul.insert(1, fits.ImageHDU()) hdul.flush() hdul.close() assert old_mode == os.stat(filename).st_mode def test_fileobj_mode_guessing(self): """Tests whether a file opened without a specified io.fits mode ('readonly', etc.) is opened in a mode appropriate for the given file object. """ self.copy_file('test0.fits') # Opening in text mode should outright fail for mode in ('r', 'w', 'a'): with open(self.temp('test0.fits'), mode) as f: pytest.raises(ValueError, fits.HDUList.fromfile, f) # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file('test0.fits') with open(self.temp('test0.fits'), 'rb') as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)['filemode'] == 'readonly' for mode in ('wb', 'ab'): with open(self.temp('test0.fits'), mode) as f: with fits.HDUList.fromfile(f) as h: # Basically opening empty files for output streaming assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file('test0.fits') with open(self.temp('test0.fits'), 'wb+') as f: with fits.HDUList.fromfile(f) as h: # wb+ still causes an existing file to be overwritten so there # are no HDUs assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file('test0.fits') with open(self.temp('test0.fits'), 'rb+') as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)['filemode'] == 'update' with open(self.temp('test0.fits'), 'ab+') as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)['filemode'] == 'append' def test_mmap_unwriteable(self): """Regression test for https://github.com/astropy/astropy/issues/968 Temporarily patches mmap.mmap to exhibit platform-specific bad behavior. """ class MockMmap(mmap.mmap): def flush(self): raise OSError('flush is broken on this platform') old_mmap = mmap.mmap mmap.mmap = MockMmap # Force the mmap test to be rerun _File.__dict__['_mmap_available']._cache.clear() try: self.copy_file('test0.fits') with catch_warnings() as w: with fits.open(self.temp('test0.fits'), mode='update', memmap=True) as h: h[1].data[0, 0] = 999 assert len(w) == 1 assert 'mmap.flush is unavailable' in str(w[0].message) # Double check that writing without mmap still worked with fits.open(self.temp('test0.fits')) as h: assert h[1].data[0, 0] == 999 finally: mmap.mmap = old_mmap _File.__dict__['_mmap_available']._cache.clear() def test_mmap_closing(self): """ Tests that the mmap reference is closed/removed when there aren't any HDU data references left. """ if not _File._mmap_available: pytest.xfail('not expected to work on platforms without mmap ' 'support') with fits.open(self.data('test0.fits'), memmap=True) as hdul: assert hdul._file._mmap is None hdul[1].data assert hdul._file._mmap is not None del hdul[1].data # Should be no more references to data in the file so close the # mmap assert hdul._file._mmap is None hdul[1].data hdul[2].data del hdul[1].data # hdul[2].data is still references so keep the mmap open assert hdul._file._mmap is not None del hdul[2].data assert hdul._file._mmap is None assert hdul._file._mmap is None with fits.open(self.data('test0.fits'), memmap=True) as hdul: hdul[1].data # When the only reference to the data is on the hdu object, and the # hdulist it belongs to has been closed, the mmap should be closed as # well assert hdul._file._mmap is None with fits.open(self.data('test0.fits'), memmap=True) as hdul: data = hdul[1].data # also make a copy data_copy = data.copy() # The HDUList is closed; in fact, get rid of it completely del hdul # The data array should still work though... assert np.all(data == data_copy) def test_uncloseable_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2356 Demonstrates that FITS files can still be read from "file-like" objects that don't have an obvious "open" or "closed" state. """ class MyFileLike: def __init__(self, foobar): self._foobar = foobar def read(self, n): return self._foobar.read(n) def seek(self, offset, whence=os.SEEK_SET): self._foobar.seek(offset, whence) def tell(self): return self._foobar.tell() with open(self.data('test0.fits'), 'rb') as f: fileobj = MyFileLike(f) with fits.open(fileobj) as hdul1: with fits.open(self.data('test0.fits')) as hdul2: assert hdul1.info(output=False) == hdul2.info(output=False) for hdu1, hdu2 in zip(hdul1, hdul2): assert hdu1.header == hdu2.header if hdu1.data is not None and hdu2.data is not None: assert np.all(hdu1.data == hdu2.data) def test_write_bytesio_discontiguous(self): """ Regression test related to https://github.com/astropy/astropy/issues/2794#issuecomment-55441539 Demonstrates that writing an HDU containing a discontiguous Numpy array should work properly. """ data = np.arange(100)[::3] hdu = fits.PrimaryHDU(data=data) fileobj = io.BytesIO() hdu.writeto(fileobj) fileobj.seek(0) with fits.open(fileobj) as h: assert np.all(h[0].data == data) def test_write_bytesio(self): """ Regression test for https://github.com/astropy/astropy/issues/2463 Test againt `io.BytesIO`. `io.StringIO` is not supported. """ self._test_write_string_bytes_io(io.BytesIO()) @pytest.mark.skipif(str('sys.platform.startswith("win32")')) def test_filename_with_colon(self): """ Test reading and writing a file with a colon in the filename. Regression test for https://github.com/astropy/astropy/issues/3122 """ # Skip on Windows since colons in filenames makes NTFS sad. filename = 'APEXHET.2014-04-01T15:18:01.000.fits' hdu = fits.PrimaryHDU(data=np.arange(10)) hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename)) as hdul: assert np.all(hdul[0].data == hdu.data) def test_writeto_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to write an hdulist to a full disk. """ def _writeto(self, array): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 with pytest.raises(OSError) as exc: monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) n = np.arange(0, 1000, dtype='int64') hdu = fits.PrimaryHDU(n) hdulist = fits.HDUList(hdu) filename = self.temp('test.fits') with open(filename, mode='wb') as fileobj: hdulist.writeto(fileobj) assert ("Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file.") == exc.value.args[0] def test_flush_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to update an hdulist to a full disk. """ filename = self.temp('test.fits') hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul[0].data = np.arange(0, 1000, dtype='int64') hdul.writeto(filename) def _writedata(self, fileobj): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) with pytest.raises(OSError) as exc: with fits.open(filename, mode='update') as hdul: hdul[0].data = np.arange(0, 1000, dtype='int64') hdul.insert(1, fits.ImageHDU()) hdul.flush() assert ("Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file.") == exc.value.args[0] def _test_write_string_bytes_io(self, fileobj): """ Implemented for both test_write_stringio and test_write_bytesio. """ with fits.open(self.data('test0.fits')) as hdul: hdul.writeto(fileobj) hdul2 = fits.HDUList.fromstring(fileobj.getvalue()) assert FITSDiff(hdul, hdul2).identical def _make_gzip_file(self, filename='test0.fits.gz'): gzfile = self.temp(filename) with open(self.data('test0.fits'), 'rb') as f: gz = gzip.open(gzfile, 'wb') gz.write(f.read()) gz.close() return gzfile def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'): zfile = zipfile.ZipFile(self.temp(filename), 'w') zfile.write(self.data('test0.fits')) zfile.close() return zfile.filename def _make_bzip2_file(self, filename='test0.fits.bz2'): bzfile = self.temp(filename) with open(self.data('test0.fits'), 'rb') as f: bz = bz2.BZ2File(bzfile, 'w') bz.write(f.read()) bz.close() return bzfile class TestStreamingFunctions(FitsTestCase): """Test functionality of the StreamingHDU class.""" def test_streaming_hdu(self): shdu = self._make_streaming_hdu(self.temp('new.fits')) assert isinstance(shdu.size, int) assert shdu.size == 100 @raises(ValueError) def test_streaming_hdu_file_wrong_mode(self): """ Test that streaming an HDU to a file opened in the wrong mode fails as expected. """ with open(self.temp('new.fits'), 'wb') as f: header = fits.Header() fits.StreamingHDU(f, header) def test_streaming_hdu_write_file(self): """Test streaming an HDU to an open file object.""" arr = np.zeros((5, 5), dtype=np.int32) with open(self.temp('new.fits'), 'ab+') as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) assert shdu.writecomplete assert shdu.size == 100 hdul = fits.open(self.temp('new.fits')) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_streaming_hdu_write_file_like(self): """Test streaming an HDU to an open file-like object.""" arr = np.zeros((5, 5), dtype=np.int32) # The file-like object underlying a StreamingHDU must be in binary mode sf = io.BytesIO() shdu = self._make_streaming_hdu(sf) shdu.write(arr) assert shdu.writecomplete assert shdu.size == 100 sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_streaming_hdu_append_extension(self): arr = np.zeros((5, 5), dtype=np.int32) with open(self.temp('new.fits'), 'ab+') as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) # Doing this again should update the file with an extension with open(self.temp('new.fits'), 'ab+') as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) def test_fix_invalid_extname(self, capsys): phdu = fits.PrimaryHDU() ihdu = fits.ImageHDU() ihdu.header['EXTNAME'] = 12345678 hdul = fits.HDUList([phdu, ihdu]) pytest.raises(fits.VerifyError, hdul.writeto, self.temp('temp.fits'), output_verify='exception') hdul.writeto(self.temp('temp.fits'), output_verify='fix') with fits.open(self.temp('temp.fits')): assert hdul[1].name == '12345678' assert hdul[1].header['EXTNAME'] == '12345678' def _make_streaming_hdu(self, fileobj): hd = fits.Header() hd['SIMPLE'] = (True, 'conforms to FITS standard') hd['BITPIX'] = (32, 'array data type') hd['NAXIS'] = (2, 'number of array dimensions') hd['NAXIS1'] = 5 hd['NAXIS2'] = 5 hd['EXTEND'] = True return fits.StreamingHDU(fileobj, hd) def test_blank_ignore(self): with fits.open(self.data('blank.fits'), ignore_blank=True) as f: assert f[0].data.flat[0] == 2 def test_error_if_memmap_impossible(self): pth = self.data('blank.fits') with pytest.raises(ValueError): fits.open(pth, memmap=True)[0].data # However, it should not fail if do_not_scale_image_data was used: # See https://github.com/astropy/astropy/issues/3766 hdul = fits.open(pth, memmap=True, do_not_scale_image_data=True) hdul[0].data # Just make sure it doesn't crash
b3e3fa304cdd502cb4a664cdef3b3b92ba9b317c66d0e28c301d10e21fefd737
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import pytest import numpy as np from . import FitsTestCase from ..fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword from ....coordinates import EarthLocation from ....io import fits from ....table import Table, QTable from ....time import Time, TimeDelta from ....time.core import BARYCENTRIC_SCALES from ....time.formats import FITS_DEPRECATED_SCALES from ....tests.helper import catch_warnings class TestFitsTime(FitsTestCase): def setup_class(self): self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00']) def test_is_time_column_keyword(self): # Time column keyword without column number assert is_time_column_keyword('TRPOS') is False # Global time column keyword assert is_time_column_keyword('TIMESYS') is False # Valid time column keyword assert is_time_column_keyword('TRPOS12') is True @pytest.mark.parametrize('table_types', (Table, QTable)) def test_time_to_fits_loc(self, table_types): """ Test all the unusual conditions for locations of ``Time`` columns in a ``Table``. """ t = table_types() t['a'] = Time(self.time, format='isot', scale='utc') t['b'] = Time(self.time, format='isot', scale='tt') # Check that vectorized location raises an exception t['a'].location = EarthLocation([1,2], [2,3], [3,4]) with pytest.raises(ValueError) as err: table, hdr = time_to_fits(t) assert 'Vectorized Location of Time Column' in str(err.value) # Check that multiple Time columns with different locations raise an exception t['a'].location = EarthLocation(1, 2, 3) t['b'].location = EarthLocation(2, 3, 4) with pytest.raises(ValueError) as err: table, hdr = time_to_fits(t) assert 'Multiple Time Columns with different geocentric' in str(err.value) # Check that Time column with no location specified will assume global location t['b'].location = None with catch_warnings() as w: table, hdr = time_to_fits(t) assert len(w) == 1 assert str(w[0].message).startswith('Time Column "b" has no specified ' 'location, but global Time Position ' 'is present') # Check that multiple Time columns with same location can be written t['b'].location = EarthLocation(1, 2, 3) with catch_warnings() as w: table, hdr = time_to_fits(t) assert len(w) == 0 # Check compatibility of Time Scales and Reference Positions for scale in BARYCENTRIC_SCALES: t.replace_column('a', getattr(t['a'], scale)) with catch_warnings() as w: table, hdr = time_to_fits(t) assert len(w) == 1 assert str(w[0].message).startswith('Earth Location "TOPOCENTER" ' 'for Time Column') @pytest.mark.parametrize('table_types', (Table, QTable)) def test_time_to_fits_header(self, table_types): """ Test the header and metadata returned by ``time_to_fits``. """ t = table_types() t['a'] = Time(self.time, format='isot', scale='utc', location=EarthLocation(-2446354, 4237210, 4077985, unit='m')) t['b'] = Time([1,2], format='cxcsec', scale='tt') ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value, 'OBSGEO-Y' : t['a'].location.y.value, 'OBSGEO-Z' : t['a'].location.z.value} table, hdr = time_to_fits(t) # Check the global time keywords in hdr for key, value in GLOBAL_TIME_INFO.items(): assert hdr[key] == value[0] assert hdr.comments[key] == value[1] hdr.remove(key) for key, value in ideal_col_hdr.items(): assert hdr[key] == value hdr.remove(key) # Check the column-specific time metadata coord_info = table.meta['__coordinate_columns__'] for colname in coord_info: assert coord_info[colname]['coord_type'] == t[colname].scale.upper() assert coord_info[colname]['coord_unit'] == 'd' assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER' assert len(hdr) == 0 @pytest.mark.parametrize('table_types', (Table, QTable)) def test_fits_to_time_meta(self, table_types): """ Test that the relevant global time metadata is read into ``Table.meta`` as ``Time``. """ t = table_types() t['a'] = Time(self.time, format='isot', scale='utc') t.meta['DATE'] = '1999-01-01T00:00:00' t.meta['MJD-OBS'] = 56670 # Test for default write behaviour (full precision) and read it # back using native astropy objects; thus, ensure its round-trip t.write(self.temp('time.fits'), format='fits', overwrite=True) tm = table_types.read(self.temp('time.fits'), format='fits', astropy_native=True) # Test DATE assert isinstance(tm.meta['DATE'], Time) assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)' assert tm.meta['DATE'].format == 'fits' # Default time scale according to the FITS standard is UTC assert tm.meta['DATE'].scale == 'utc' # Test MJD-xxx assert isinstance(tm.meta['MJD-OBS'], Time) assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS'] assert tm.meta['MJD-OBS'].format == 'mjd' assert tm.meta['MJD-OBS'].scale == 'utc' # Explicitly specified Time Scale t.meta['TIMESYS'] = 'ET' t.write(self.temp('time.fits'), format='fits', overwrite=True) tm = table_types.read(self.temp('time.fits'), format='fits', astropy_native=True) # Test DATE assert isinstance(tm.meta['DATE'], Time) assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)' assert tm.meta['DATE'].scale == 'utc' # Test MJD-xxx assert isinstance(tm.meta['MJD-OBS'], Time) assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS'] assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']] # Test for conversion of time data to its value, as defined by its format t['a'].info.serialize_method['fits'] = 'formatted_value' t.write(self.temp('time.fits'), format='fits', overwrite=True) tm = table_types.read(self.temp('time.fits'), format='fits') # Test DATE assert not isinstance(tm.meta['DATE'], Time) assert tm.meta['DATE'] == t.meta['DATE'] # Test MJD-xxx assert not isinstance(tm.meta['MJD-OBS'], Time) assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS'] assert (tm['a'] == t['a'].value).all() @pytest.mark.parametrize('table_types', (Table, QTable)) def test_time_loc_unit(self, table_types): """ Test that ``location`` specified by using any valid unit (length/angle) in ``Time`` columns gets stored in FITS as ITRS Cartesian coordinates (X, Y, Z), each in m. Test that it round-trips through FITS. """ t = table_types() t['a'] = Time(self.time, format='isot', scale='utc', location=EarthLocation(1,2,3, unit='km')) table, hdr = time_to_fits(t) # Check the header hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m') hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m') hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m') t.write(self.temp('time.fits'), format='fits', overwrite=True) tm = table_types.read(self.temp('time.fits'), format='fits', astropy_native=True) # Check the round-trip of location tm['a'].location == t['a'].location tm['a'].location.x.value == t['a'].location.x.to_value(unit='m') tm['a'].location.y.value == t['a'].location.y.to_value(unit='m') tm['a'].location.z.value == t['a'].location.z.to_value(unit='m') @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_read_fits(self, table_types): """ Test that FITS table with time columns (standard compliant) can be read by io.fits as a table with Time columns. This tests the following: 1. The special-case where a column has the name 'TIME' and a time unit 2. Time from Epoch (Reference time) is appropriately converted. 3. Coordinate columns (corresponding to coordinate keywords in the header) other than time, that is, spatial coordinates, are not mistaken to be time. """ filename = self.data('chandra_time.fits') tm = table_types.read(filename, astropy_native=True) # Test case 1 assert isinstance(tm['time'], Time) assert tm['time'].scale == 'tt' assert tm['time'].format == 'mjd' non_native = table_types.read(filename) # Test case 2 ref_time = Time(non_native.meta['MJDREF'], format='mjd', scale=non_native.meta['TIMESYS'].lower()) delta_time = TimeDelta(non_native['time']) assert (ref_time + delta_time == tm['time']).all() # Test case 3 for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']: assert not isinstance(tm[colname], Time) @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_read_fits_datetime(self, table_types): """ Test that ISO-8601 Datetime String Columns are read correctly. """ # Datetime column c = fits.Column(name='datetime', format='A29', coord_type='TCG', time_ref_pos='GEOCENTER', array=self.time) # Explicitly create a FITS Binary Table bhdu = fits.BinTableHDU.from_columns([c]) bhdu.writeto(self.temp('time.fits'), overwrite=True) tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert isinstance(tm['datetime'], Time) assert tm['datetime'].scale == 'tcg' assert tm['datetime'].format == 'fits' assert (tm['datetime'] == self.time).all() @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_read_fits_location(self, table_types): """ Test that geocentric/geodetic observatory position is read properly, as and when it is specified. """ # Datetime column c = fits.Column(name='datetime', format='A29', coord_type='TT', time_ref_pos='TOPOCENTER', array=self.time) # Observatory position in ITRS Cartesian coordinates (geocentric) cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210), ('OBSGEO-Z', 4077985)] # Explicitly create a FITS Binary Table bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards)) bhdu.writeto(self.temp('time.fits'), overwrite=True) tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert isinstance(tm['datetime'], Time) assert tm['datetime'].location.x.value == -2446354 assert tm['datetime'].location.y.value == 4237210 assert tm['datetime'].location.z.value == 4077985 # Observatory position in geodetic coordinates cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)] # Explicitly create a FITS Binary Table bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards)) bhdu.writeto(self.temp('time.fits'), overwrite=True) tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert isinstance(tm['datetime'], Time) assert tm['datetime'].location.lon.value == 0 assert tm['datetime'].location.lat.value == 0 assert tm['datetime'].location.height.value == 0 @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_read_fits_scale(self, table_types): """ Test handling of 'GPS' and 'LOCAL' time scales which are recognized by the FITS standard but are not native to astropy. """ # GPS scale column gps_time = np.array([630720013, 630720014]) c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS', coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time) cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)] bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards)) bhdu.writeto(self.temp('time.fits'), overwrite=True) with catch_warnings() as w: tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert len(w) == 1 assert 'FITS recognized time scale value "GPS"' in str(w[0].message) assert isinstance(tm['gps_time'], Time) assert tm['gps_time'].format == 'gps' assert tm['gps_time'].scale == 'tai' assert (tm['gps_time'].value == gps_time).all() # LOCAL scale column local_time = np.array([1, 2]) c = fits.Column(name='local_time', format='D', unit='d', coord_type='LOCAL', coord_unit='d', time_ref_pos='RELOCATABLE', array=local_time) bhdu = fits.BinTableHDU.from_columns([c]) bhdu.writeto(self.temp('time.fits'), overwrite=True) with catch_warnings() as w: tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert len(w) == 1 assert 'FITS recognized time scale value "LOCAL"' in str(w[0].message) assert isinstance(tm['local_time'], Time) assert tm['local_time'].format == 'mjd' # Default scale is UTC assert tm['local_time'].scale == 'utc' assert (tm['local_time'].value == local_time).all() @pytest.mark.parametrize('table_types', (Table, QTable)) def test_io_time_read_fits_location_warnings(self, table_types): """ Test warnings for time column reference position. """ # Time reference position "TOPOCENTER" without corresponding # observatory position. c = fits.Column(name='datetime', format='A29', coord_type='TT', time_ref_pos='TOPOCENTER', array=self.time) bhdu = fits.BinTableHDU.from_columns([c]) bhdu.writeto(self.temp('time.fits'), overwrite=True) with catch_warnings() as w: tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert len(w) == 1 assert ('observatory position is not properly specified' in str(w[0].message)) # Default value for time reference position is "TOPOCENTER" c = fits.Column(name='datetime', format='A29', coord_type='TT', array=self.time) bhdu = fits.BinTableHDU.from_columns([c]) bhdu.writeto(self.temp('time.fits'), overwrite=True) with catch_warnings() as w: tm = table_types.read(self.temp('time.fits'), astropy_native=True) assert len(w) == 1 assert ('"TRPOSn" is not specified. The default value for ' 'it is "TOPOCENTER"' in str(w[0].message))
8cefbd8de1325413acfc639a840242b43ab1a097d041ed3ecfcda0cb1b242058
# Licensed under a 3-clause BSD style license - see LICENSE.rst from . import FitsTestCase from ..scripts import fitsinfo class TestFitsinfo(FitsTestCase): def test_onefile(self, capsys): fitsinfo.main([self.data('arange.fits')]) out, err = capsys.readouterr() out = out.splitlines() assert len(out) == 3 assert out[1].startswith( 'No. Name Ver Type Cards Dimensions Format') assert out[2].startswith( ' 0 PRIMARY 1 PrimaryHDU 7 (11, 10, 7) int32') def test_multiplefiles(self, capsys): fitsinfo.main([self.data('arange.fits'), self.data('ascii.fits')]) out, err = capsys.readouterr() out = out.splitlines() assert len(out) == 8 assert out[1].startswith( 'No. Name Ver Type Cards Dimensions Format') assert out[2].startswith( ' 0 PRIMARY 1 PrimaryHDU 7 (11, 10, 7) int32') assert out[3] == '' assert out[7].startswith( ' 1 1 TableHDU 20 5R x 2C [E10.4, I5]')
be4c6f485ae5b1e604da8a240eee912e2f9652566a5271e0e599c2a087dfbb83
# Licensed under a 3-clause BSD style license - see PYFITS.rst import platform import pytest import numpy as np from ....io import fits from . import FitsTestCase from ....tests.helper import ignore_warnings class TestUintFunctions(FitsTestCase): @classmethod def setup_class(cls): cls.utypes = ('u2', 'u4', 'u8') cls.utype_map = {'u2': np.uint16, 'u4': np.uint32, 'u8': np.uint64} cls.itype_map = {'u2': np.int16, 'u4': np.int32, 'u8': np.int64} cls.format_map = {'u2': 'I', 'u4': 'J', 'u8': 'K'} # Test of 64-bit compressed image is disabled. cfitsio library doesn't # like it @pytest.mark.parametrize(('utype', 'compressed'), [('u2', False), ('u4', False), ('u8', False), ('u2', True), ('u4', True)]) # ,('u8',True)]) def test_uint(self, utype, compressed): bits = 8*int(utype[1]) if platform.architecture()[0] == '64bit' or bits != 64: if compressed: hdu = fits.CompImageHDU(np.array([-3, -2, -1, 0, 1, 2, 3], dtype=np.int64)) hdu_number = 1 else: hdu = fits.PrimaryHDU(np.array([-3, -2, -1, 0, 1, 2, 3], dtype=np.int64)) hdu_number = 0 hdu.scale('int{0:d}'.format(bits), '', bzero=2 ** (bits-1)) with ignore_warnings(): hdu.writeto(self.temp('tempfile.fits'), overwrite=True) with fits.open(self.temp('tempfile.fits'), uint=True) as hdul: assert hdul[hdu_number].data.dtype == self.utype_map[utype] assert (hdul[hdu_number].data == np.array( [(2 ** bits) - 3, (2 ** bits) - 2, (2 ** bits) - 1, 0, 1, 2, 3], dtype=self.utype_map[utype])).all() hdul.writeto(self.temp('tempfile1.fits')) with fits.open(self.temp('tempfile1.fits'), uint16=True) as hdul1: d1 = hdul[hdu_number].data d2 = hdul1[hdu_number].data assert (d1 == d2).all() if not compressed: # TODO: Enable these lines if CompImageHDUs ever grow # .section support sec = hdul[hdu_number].section[:1] assert sec.dtype.name == 'uint{}'.format(bits) assert (sec == d1[:1]).all() @pytest.mark.parametrize('utype', ('u2', 'u4', 'u8')) def test_uint_columns(self, utype): """Test basic functionality of tables with columns containing pseudo-unsigned integers. See https://github.com/astropy/astropy/pull/906 """ bits = 8*int(utype[1]) if platform.architecture()[0] == '64bit' or bits != 64: bzero = self.utype_map[utype](2**(bits-1)) one = self.utype_map[utype](1) u0 = np.arange(bits+1, dtype=self.utype_map[utype]) u = 2**u0 - one if bits == 64: u[63] = bzero - one u[64] = u[63] + u[63] + one uu = (u - bzero).view(self.itype_map[utype]) # Construct a table from explicit column col = fits.Column(name=utype, array=u, format=self.format_map[utype], bzero=bzero) table = fits.BinTableHDU.from_columns([col]) assert (table.data[utype] == u).all() # This used to be table.data.base, but now after adding a table to # a BinTableHDU it gets stored as a view of the original table, # even if the original was already a FITS_rec. So now we need # table.data.base.base assert (table.data.base.base[utype] == uu).all() hdu0 = fits.PrimaryHDU() hdulist = fits.HDUList([hdu0, table]) with ignore_warnings(): hdulist.writeto(self.temp('tempfile.fits'), overwrite=True) # Test write of unsigned int del hdulist with fits.open(self.temp('tempfile.fits'), uint=True) as hdulist2: hdudata = hdulist2[1].data assert (hdudata[utype] == u).all() assert (hdudata[utype].dtype == self.utype_map[utype]) assert (hdudata.base[utype] == uu).all() # Construct recarray then write out that. v = u.view(dtype=[(utype, self.utype_map[utype])]) with ignore_warnings(): fits.writeto(self.temp('tempfile2.fits'), v, overwrite=True) with fits.open(self.temp('tempfile2.fits'), uint=True) as hdulist3: hdudata3 = hdulist3[1].data assert (hdudata3.base[utype] == table.data.base.base[utype]).all() assert (hdudata3[utype] == table.data[utype]).all() assert (hdudata3[utype] == u).all()
6ddf130ee3a0d62ead35af111ecfcfa1e661c943c5d5543fe584a0cf50490b8d
# Licensed under a 3-clause BSD style license - see PYFITS.rst import math import os import platform import re import time import warnings import pytest import numpy as np from numpy.testing import assert_equal from ....io import fits from ....utils.exceptions import AstropyPendingDeprecationWarning from ....utils.compat import NUMPY_LT_1_12 from ....tests.helper import raises, catch_warnings, ignore_warnings from ..hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM from .test_table import comparerecords from . import FitsTestCase try: import scipy # pylint: disable=W0611 except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True class TestImageFunctions(FitsTestCase): def test_constructor_name_arg(self): """Like the test of the same name in test_table.py""" hdu = fits.ImageHDU() assert hdu.name == '' assert 'EXTNAME' not in hdu.header hdu.name = 'FOO' assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # Passing name to constructor hdu = fits.ImageHDU(name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' # And overriding a header with a different extname hdr = fits.Header() hdr['EXTNAME'] = 'EVENTS' hdu = fits.ImageHDU(header=hdr, name='FOO') assert hdu.name == 'FOO' assert hdu.header['EXTNAME'] == 'FOO' def test_constructor_ver_arg(self): def assert_ver_is(hdu, reference_ver): assert hdu.ver == reference_ver assert hdu.header['EXTVER'] == reference_ver hdu = fits.ImageHDU() assert hdu.ver == 1 # defaults to 1 assert 'EXTVER' not in hdu.header hdu.ver = 1 assert_ver_is(hdu, 1) # Passing name to constructor hdu = fits.ImageHDU(ver=2) assert_ver_is(hdu, 2) # And overriding a header with a different extver hdr = fits.Header() hdr['EXTVER'] = 3 hdu = fits.ImageHDU(header=hdr, ver=4) assert_ver_is(hdu, 4) # The header card is not overridden if ver is None or not passed in hdr = fits.Header() hdr['EXTVER'] = 5 hdu = fits.ImageHDU(header=hdr, ver=None) assert_ver_is(hdu, 5) hdu = fits.ImageHDU(header=hdr) assert_ver_is(hdu, 5) def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. """ ifd = fits.HDUList(fits.PrimaryHDU()) phdr = ifd[0].header phdr['FILENAME'] = 'labq01i3q_rawtag.fits' primary_hdu = fits.PrimaryHDU(header=phdr) ofd = fits.HDUList(primary_hdu) ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits' # Original header should be unchanged assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits' def test_open(self): # The function "open" reads a FITS file into an HDUList object. There # are three modes to open: "readonly" (the default), "append", and # "update". # Open a file read-only (the default mode), the content of the FITS # file are read into memory. r = fits.open(self.data('test0.fits')) # readonly # data parts are latent instantiation, so if we close the HDUList # without touching data, data can not be accessed. r.close() with pytest.raises(IndexError) as exc_info: r[1].data[:2, :2] # Check that the exception message is the enhanced version, not the # default message from list.__getitem__ assert str(exc_info.value) == ('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') def test_open_2(self): r = fits.open(self.data('test0.fits')) info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] + [(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '') for x in range(1, 5)]) try: assert r.info(output=False) == info finally: r.close() def test_open_3(self): # Test that HDUs cannot be accessed after the file was closed r = fits.open(self.data('test0.fits')) r.close() with pytest.raises(IndexError) as exc_info: r[1] # Check that the exception message is the enhanced version, not the # default message from list.__getitem__ assert str(exc_info.value) == ('HDU not found, possibly because the index ' 'is out of range, or because the file was ' 'closed before all HDUs were read') # Test that HDUs can be accessed with lazy_load_hdus=False r = fits.open(self.data('test0.fits'), lazy_load_hdus=False) r.close() assert isinstance(r[1], fits.ImageHDU) assert len(r) == 5 with pytest.raises(IndexError) as exc_info: r[6] assert str(exc_info.value) == 'list index out of range' # And the same with the global config item assert fits.conf.lazy_load_hdus # True by default fits.conf.lazy_load_hdus = False try: r = fits.open(self.data('test0.fits')) r.close() assert isinstance(r[1], fits.ImageHDU) assert len(r) == 5 finally: fits.conf.lazy_load_hdus = True def test_primary_with_extname(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151 Tests that the EXTNAME keyword works with Primary HDUs as well, and interacts properly with the .name attribute. For convenience hdulist['PRIMARY'] will still refer to the first HDU even if it has an EXTNAME not equal to 'PRIMARY'. """ prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)]) hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)]) assert 'EXTNAME' in hdul[0].header assert hdul[0].name == 'XPRIMARY' assert hdul[0].name == hdul[0].header['EXTNAME'] info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')] assert hdul.info(output=False) == info assert hdul['PRIMARY'] is hdul['XPRIMARY'] assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)] hdul[0].name = 'XPRIMARY2' assert hdul[0].header['EXTNAME'] == 'XPRIMARY2' hdul.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[0].name == 'XPRIMARY2' @pytest.mark.xfail(platform.system() == 'Windows' and not NUMPY_LT_1_12, reason='https://github.com/astropy/astropy/issues/5797') def test_io_manipulation(self): # Get a keyword value. An extension can be referred by name or by # number. Both extension and keyword names are case insensitive. with fits.open(self.data('test0.fits')) as r: assert r['primary'].header['naxis'] == 0 assert r[0].header['naxis'] == 0 # If there are more than one extension with the same EXTNAME value, # the EXTVER can be used (as the second argument) to distinguish # the extension. assert r['sci', 1].header['detector'] == 1 # append (using "update()") a new card r[0].header['xxx'] = 1.234e56 assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) == "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n" "FILENAME= 'vtest3.fits' / File name \n" "XXX = 1.234E+56 ") # rename a keyword r[0].header.rename_keyword('filename', 'fname') pytest.raises(ValueError, r[0].header.rename_keyword, 'fname', 'history') pytest.raises(ValueError, r[0].header.rename_keyword, 'fname', 'simple') r[0].header.rename_keyword('fname', 'filename') # get a subsection of data assert np.array_equal(r[2].data[:3, :3], np.array([[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16)) # We can create a new FITS file by opening a new file with "append" # mode. with fits.open(self.temp('test_new.fits'), mode='append') as n: # Append the primary header and the 2nd extension to the new # file. n.append(r[0]) n.append(r[2]) # The flush method will write the current HDUList object back # to the newly created file on disk. The HDUList is still open # and can be further operated. n.flush() assert n[1].data[1, 1] == 349 # modify a data point n[1].data[1, 1] = 99 # When the file is closed, the most recent additions of # extension(s) since last flush() will be appended, but any HDU # already existed at the last flush will not be modified del n # If an existing file is opened with "append" mode, like the # readonly mode, the HDU's will be read into the HDUList which can # be modified in memory but can not be written back to the original # file. A file opened with append mode can only add new HDU's. os.rename(self.temp('test_new.fits'), self.temp('test_append.fits')) with fits.open(self.temp('test_append.fits'), mode='append') as a: # The above change did not take effect since this was made # after the flush(). assert a[1].data[1, 1] == 349 a.append(r[1]) del a # When changes are made to an HDUList which was opened with # "update" mode, they will be written back to the original file # when a flush/close is called. os.rename(self.temp('test_append.fits'), self.temp('test_update.fits')) with fits.open(self.temp('test_update.fits'), mode='update') as u: # When the changes do not alter the size structures of the # original (or since last flush) HDUList, the changes are # written back "in place". assert u[0].header['rootname'] == 'U2EQ0201T' u[0].header['rootname'] = 'abc' assert u[1].data[1, 1] == 349 u[1].data[1, 1] = 99 u.flush() # If the changes affect the size structure, e.g. adding or # deleting HDU(s), header was expanded or reduced beyond # existing number of blocks (2880 bytes in each block), or # change the data size, the HDUList is written to a temporary # file, the original file is deleted, and the temporary file is # renamed to the original file name and reopened in the update # mode. To a user, these two kinds of updating writeback seem # to be the same, unless the optional argument in flush or # close is set to 1. del u[2] u.flush() # the write method in HDUList class writes the current HDUList, # with all changes made up to now, to a new file. This method # works the same disregard the mode the HDUList was opened # with. u.append(r[3]) u.writeto(self.temp('test_new.fits')) del u # Another useful new HDUList method is readall. It will "touch" the # data parts in all HDUs, so even if the HDUList is closed, we can # still operate on the data. with fits.open(self.data('test0.fits')) as r: r.readall() assert r[1].data[1, 1] == 315 # create an HDU with data only data = np.ones((3, 5), dtype=np.float32) hdu = fits.ImageHDU(data=data, name='SCI') assert np.array_equal(hdu.data, np.array([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=np.float32)) # create an HDU with header and data # notice that the header has the right NAXIS's since it is constructed # with ImageHDU hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype='int32')) assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) == "BITPIX = 32 / array data type \n" "NAXIS = 1 / number of array dimensions \n" "NAXIS1 = 2 \n" "PCOUNT = 0 / number of parameters ") def test_memory_mapping(self): # memory mapping f1 = fits.open(self.data('test0.fits'), memmap=1) f1.close() def test_verification_on_output(self): # verification on output # make a defect HDUList first x = fits.ImageHDU() hdu = fits.HDUList(x) # HDUList can take a list or one single HDU with catch_warnings() as w: hdu.verify() text = "HDUList's 0th element is not a primary HDU." assert len(w) == 3 assert text in str(w[1].message) with catch_warnings() as w: hdu.writeto(self.temp('test_new2.fits'), 'fix') text = ("HDUList's 0th element is not a primary HDU. " "Fixed by inserting one as 0th HDU.") assert len(w) == 3 assert text in str(w[1].message) def test_section(self): # section testing fs = fits.open(self.data('arange.fits')) assert np.array_equal(fs[0].section[3, 2, 5], 357) assert np.array_equal( fs[0].section[3, 2, :], np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362])) assert np.array_equal(fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])) assert np.array_equal(fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])) assert np.array_equal(fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])) assert np.array_equal( fs[0].section[3, 2:5, :], np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362], [363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373], [374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]])) assert np.array_equal(fs[0].section[3, :, :][:3, :3], np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]])) dat = fs[0].data assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8]) assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3]) assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3], np.array([[[330, 331, 332], [341, 342, 343], [352, 353, 354]], [[440, 441, 442], [451, 452, 453], [462, 463, 464]], [[550, 551, 552], [561, 562, 563], [572, 573, 574]]])) assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2], np.array([[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]])) assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :]) assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :]) assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :]) assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :]) assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2]) assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3]) bool_index = np.array([True, False, True, True, False, False, True, True, False, True]) assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :]) assert np.array_equal( fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...]) assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2]) assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3]) def test_section_data_single(self): a = np.array([1]) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) sec = hdul[0].section dat = hdul[0].data assert np.array_equal(sec[0], dat[0]) assert np.array_equal(sec[...], dat[...]) assert np.array_equal(sec[..., 0], dat[..., 0]) assert np.array_equal(sec[0, ...], dat[0, ...]) def test_section_data_square(self): a = np.arange(4).reshape(2, 2) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() def test_section_data_cube(self): a = np.arange(18).reshape(2, 3, 3) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data # TODO: Generate these perumtions instead of having them all written # out, yeesh! assert (d.section[:, :, :] == dat[:, :, :]).all() assert (d.section[:, :] == dat[:, :]).all() assert (d.section[:] == dat[:]).all() assert (d.section[0, :, :] == dat[0, :, :]).all() assert (d.section[1, :, :] == dat[1, :, :]).all() assert (d.section[0, 0, :] == dat[0, 0, :]).all() assert (d.section[0, 1, :] == dat[0, 1, :]).all() assert (d.section[0, 2, :] == dat[0, 2, :]).all() assert (d.section[1, 0, :] == dat[1, 0, :]).all() assert (d.section[1, 1, :] == dat[1, 1, :]).all() assert (d.section[1, 2, :] == dat[1, 2, :]).all() assert (d.section[0, 0, 0] == dat[0, 0, 0]).all() assert (d.section[0, 0, 1] == dat[0, 0, 1]).all() assert (d.section[0, 0, 2] == dat[0, 0, 2]).all() assert (d.section[0, 1, 0] == dat[0, 1, 0]).all() assert (d.section[0, 1, 1] == dat[0, 1, 1]).all() assert (d.section[0, 1, 2] == dat[0, 1, 2]).all() assert (d.section[0, 2, 0] == dat[0, 2, 0]).all() assert (d.section[0, 2, 1] == dat[0, 2, 1]).all() assert (d.section[0, 2, 2] == dat[0, 2, 2]).all() assert (d.section[1, 0, 0] == dat[1, 0, 0]).all() assert (d.section[1, 0, 1] == dat[1, 0, 1]).all() assert (d.section[1, 0, 2] == dat[1, 0, 2]).all() assert (d.section[1, 1, 0] == dat[1, 1, 0]).all() assert (d.section[1, 1, 1] == dat[1, 1, 1]).all() assert (d.section[1, 1, 2] == dat[1, 1, 2]).all() assert (d.section[1, 2, 0] == dat[1, 2, 0]).all() assert (d.section[1, 2, 1] == dat[1, 2, 1]).all() assert (d.section[1, 2, 2] == dat[1, 2, 2]).all() assert (d.section[:, 0, 0] == dat[:, 0, 0]).all() assert (d.section[:, 0, 1] == dat[:, 0, 1]).all() assert (d.section[:, 0, 2] == dat[:, 0, 2]).all() assert (d.section[:, 1, 0] == dat[:, 1, 0]).all() assert (d.section[:, 1, 1] == dat[:, 1, 1]).all() assert (d.section[:, 1, 2] == dat[:, 1, 2]).all() assert (d.section[:, 2, 0] == dat[:, 2, 0]).all() assert (d.section[:, 2, 1] == dat[:, 2, 1]).all() assert (d.section[:, 2, 2] == dat[:, 2, 2]).all() assert (d.section[0, :, 0] == dat[0, :, 0]).all() assert (d.section[0, :, 1] == dat[0, :, 1]).all() assert (d.section[0, :, 2] == dat[0, :, 2]).all() assert (d.section[1, :, 0] == dat[1, :, 0]).all() assert (d.section[1, :, 1] == dat[1, :, 1]).all() assert (d.section[1, :, 2] == dat[1, :, 2]).all() assert (d.section[:, :, 0] == dat[:, :, 0]).all() assert (d.section[:, :, 1] == dat[:, :, 1]).all() assert (d.section[:, :, 2] == dat[:, :, 2]).all() assert (d.section[:, 0, :] == dat[:, 0, :]).all() assert (d.section[:, 1, :] == dat[:, 1, :]).all() assert (d.section[:, 2, :] == dat[:, 2, :]).all() assert (d.section[:, :, 0:1] == dat[:, :, 0:1]).all() assert (d.section[:, :, 0:2] == dat[:, :, 0:2]).all() assert (d.section[:, :, 0:3] == dat[:, :, 0:3]).all() assert (d.section[:, :, 1:2] == dat[:, :, 1:2]).all() assert (d.section[:, :, 1:3] == dat[:, :, 1:3]).all() assert (d.section[:, :, 2:3] == dat[:, :, 2:3]).all() assert (d.section[0:1, 0:1, 0:1] == dat[0:1, 0:1, 0:1]).all() assert (d.section[0:1, 0:1, 0:2] == dat[0:1, 0:1, 0:2]).all() assert (d.section[0:1, 0:1, 0:3] == dat[0:1, 0:1, 0:3]).all() assert (d.section[0:1, 0:1, 1:2] == dat[0:1, 0:1, 1:2]).all() assert (d.section[0:1, 0:1, 1:3] == dat[0:1, 0:1, 1:3]).all() assert (d.section[0:1, 0:1, 2:3] == dat[0:1, 0:1, 2:3]).all() assert (d.section[0:1, 0:2, 0:1] == dat[0:1, 0:2, 0:1]).all() assert (d.section[0:1, 0:2, 0:2] == dat[0:1, 0:2, 0:2]).all() assert (d.section[0:1, 0:2, 0:3] == dat[0:1, 0:2, 0:3]).all() assert (d.section[0:1, 0:2, 1:2] == dat[0:1, 0:2, 1:2]).all() assert (d.section[0:1, 0:2, 1:3] == dat[0:1, 0:2, 1:3]).all() assert (d.section[0:1, 0:2, 2:3] == dat[0:1, 0:2, 2:3]).all() assert (d.section[0:1, 0:3, 0:1] == dat[0:1, 0:3, 0:1]).all() assert (d.section[0:1, 0:3, 0:2] == dat[0:1, 0:3, 0:2]).all() assert (d.section[0:1, 0:3, 0:3] == dat[0:1, 0:3, 0:3]).all() assert (d.section[0:1, 0:3, 1:2] == dat[0:1, 0:3, 1:2]).all() assert (d.section[0:1, 0:3, 1:3] == dat[0:1, 0:3, 1:3]).all() assert (d.section[0:1, 0:3, 2:3] == dat[0:1, 0:3, 2:3]).all() assert (d.section[0:1, 1:2, 0:1] == dat[0:1, 1:2, 0:1]).all() assert (d.section[0:1, 1:2, 0:2] == dat[0:1, 1:2, 0:2]).all() assert (d.section[0:1, 1:2, 0:3] == dat[0:1, 1:2, 0:3]).all() assert (d.section[0:1, 1:2, 1:2] == dat[0:1, 1:2, 1:2]).all() assert (d.section[0:1, 1:2, 1:3] == dat[0:1, 1:2, 1:3]).all() assert (d.section[0:1, 1:2, 2:3] == dat[0:1, 1:2, 2:3]).all() assert (d.section[0:1, 1:3, 0:1] == dat[0:1, 1:3, 0:1]).all() assert (d.section[0:1, 1:3, 0:2] == dat[0:1, 1:3, 0:2]).all() assert (d.section[0:1, 1:3, 0:3] == dat[0:1, 1:3, 0:3]).all() assert (d.section[0:1, 1:3, 1:2] == dat[0:1, 1:3, 1:2]).all() assert (d.section[0:1, 1:3, 1:3] == dat[0:1, 1:3, 1:3]).all() assert (d.section[0:1, 1:3, 2:3] == dat[0:1, 1:3, 2:3]).all() assert (d.section[1:2, 0:1, 0:1] == dat[1:2, 0:1, 0:1]).all() assert (d.section[1:2, 0:1, 0:2] == dat[1:2, 0:1, 0:2]).all() assert (d.section[1:2, 0:1, 0:3] == dat[1:2, 0:1, 0:3]).all() assert (d.section[1:2, 0:1, 1:2] == dat[1:2, 0:1, 1:2]).all() assert (d.section[1:2, 0:1, 1:3] == dat[1:2, 0:1, 1:3]).all() assert (d.section[1:2, 0:1, 2:3] == dat[1:2, 0:1, 2:3]).all() assert (d.section[1:2, 0:2, 0:1] == dat[1:2, 0:2, 0:1]).all() assert (d.section[1:2, 0:2, 0:2] == dat[1:2, 0:2, 0:2]).all() assert (d.section[1:2, 0:2, 0:3] == dat[1:2, 0:2, 0:3]).all() assert (d.section[1:2, 0:2, 1:2] == dat[1:2, 0:2, 1:2]).all() assert (d.section[1:2, 0:2, 1:3] == dat[1:2, 0:2, 1:3]).all() assert (d.section[1:2, 0:2, 2:3] == dat[1:2, 0:2, 2:3]).all() assert (d.section[1:2, 0:3, 0:1] == dat[1:2, 0:3, 0:1]).all() assert (d.section[1:2, 0:3, 0:2] == dat[1:2, 0:3, 0:2]).all() assert (d.section[1:2, 0:3, 0:3] == dat[1:2, 0:3, 0:3]).all() assert (d.section[1:2, 0:3, 1:2] == dat[1:2, 0:3, 1:2]).all() assert (d.section[1:2, 0:3, 1:3] == dat[1:2, 0:3, 1:3]).all() assert (d.section[1:2, 0:3, 2:3] == dat[1:2, 0:3, 2:3]).all() assert (d.section[1:2, 1:2, 0:1] == dat[1:2, 1:2, 0:1]).all() assert (d.section[1:2, 1:2, 0:2] == dat[1:2, 1:2, 0:2]).all() assert (d.section[1:2, 1:2, 0:3] == dat[1:2, 1:2, 0:3]).all() assert (d.section[1:2, 1:2, 1:2] == dat[1:2, 1:2, 1:2]).all() assert (d.section[1:2, 1:2, 1:3] == dat[1:2, 1:2, 1:3]).all() assert (d.section[1:2, 1:2, 2:3] == dat[1:2, 1:2, 2:3]).all() assert (d.section[1:2, 1:3, 0:1] == dat[1:2, 1:3, 0:1]).all() assert (d.section[1:2, 1:3, 0:2] == dat[1:2, 1:3, 0:2]).all() assert (d.section[1:2, 1:3, 0:3] == dat[1:2, 1:3, 0:3]).all() assert (d.section[1:2, 1:3, 1:2] == dat[1:2, 1:3, 1:2]).all() assert (d.section[1:2, 1:3, 1:3] == dat[1:2, 1:3, 1:3]).all() assert (d.section[1:2, 1:3, 2:3] == dat[1:2, 1:3, 2:3]).all() def test_section_data_four(self): a = np.arange(256).reshape(4, 4, 4, 4) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :, :, :] == dat[:, :, :, :]).all() assert (d.section[:, :, :] == dat[:, :, :]).all() assert (d.section[:, :] == dat[:, :]).all() assert (d.section[:] == dat[:]).all() assert (d.section[0, :, :, :] == dat[0, :, :, :]).all() assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all() assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all() assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all() assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all() def test_section_data_scaled(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143 This is like test_section_data_square but uses a file containing scaled image data, to test that sections can work correctly with scaled data. """ hdul = fits.open(self.data('scale.fits')) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() # Test without having accessed the full data first hdul = fits.open(self.data('scale.fits')) d = hdul[0] assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() assert not d._data_loaded def test_do_not_scale_image_data(self): hdul = fits.open(self.data('scale.fits'), do_not_scale_image_data=True) assert hdul[0].data.dtype == np.dtype('>i2') hdul = fits.open(self.data('scale.fits')) assert hdul[0].data.dtype == np.dtype('float32') def test_append_uint_data(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56 (BZERO and BSCALE added in the wrong location when appending scaled data) """ fits.writeto(self.temp('test_new.fits'), data=np.array([], dtype='uint8')) d = np.zeros([100, 100]).astype('uint16') fits.append(self.temp('test_new.fits'), data=d) f = fits.open(self.temp('test_new.fits'), uint=True) assert f[1].data.dtype == 'uint16' def test_scale_with_explicit_bzero_bscale(self): """ Regression test for https://github.com/astropy/astropy/issues/6399 """ hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(np.random.rand(100,100)) # The line below raised an exception in astropy 2.0, so if it does not # raise an error here, that is progress. hdu2.scale(type='uint8', bscale=1, bzero=0) def test_uint_header_consistency(self): """ Regression test for https://github.com/astropy/astropy/issues/2305 This ensures that an HDU containing unsigned integer data always has the apppriate BZERO value in its header. """ for int_size in (16, 32, 64): # Just make an array of some unsigned ints that wouldn't fit in a # signed int array of the same bit width max_uint = (2 ** int_size) - 1 if int_size == 64: max_uint = np.uint64(int_size) dtype = 'uint{}'.format(int_size) arr = np.empty(100, dtype=dtype) arr.fill(max_uint) arr -= np.arange(100, dtype=dtype) uint_hdu = fits.PrimaryHDU(data=arr) assert np.all(uint_hdu.data == arr) assert uint_hdu.data.dtype.name == 'uint{}'.format(int_size) assert 'BZERO' in uint_hdu.header assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1)) filename = 'uint{}.fits'.format(int_size) uint_hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename), uint=True) as hdul: new_uint_hdu = hdul[0] assert np.all(new_uint_hdu.data == arr) assert new_uint_hdu.data.dtype.name == 'uint{}'.format(int_size) assert 'BZERO' in new_uint_hdu.header assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1)) @pytest.mark.parametrize(('from_file'), (False, True)) @pytest.mark.parametrize(('do_not_scale'), (False,)) def test_uint_header_keywords_removed_after_bitpix_change(self, from_file, do_not_scale): """ Regression test for https://github.com/astropy/astropy/issues/4974 BZERO/BSCALE should be removed if data is converted to a floating point type. Currently excluding the case where do_not_scale_image_data=True because it is not clear what the expectation should be. """ arr = np.zeros(100, dtype='uint16') if from_file: # To generate the proper input file we always want to scale the # data before writing it...otherwise when we open it will be # regular (signed) int data. tmp_uint = fits.PrimaryHDU(arr) filename = 'unsigned_int.fits' tmp_uint.writeto(self.temp(filename)) with fits.open(self.temp(filename), do_not_scale_image_data=do_not_scale) as f: uint_hdu = f[0] # Force a read before we close. _ = uint_hdu.data else: uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale) # Make sure appropriate keywords are in the header. See # https://github.com/astropy/astropy/pull/3916#issuecomment-122414532 # for discussion. assert 'BSCALE' in uint_hdu.header assert 'BZERO' in uint_hdu.header assert uint_hdu.header['BSCALE'] == 1 assert uint_hdu.header['BZERO'] == 32768 # Convert data to floating point... uint_hdu.data = uint_hdu.data * 1.0 # ...bitpix should be negative. assert uint_hdu.header['BITPIX'] < 0 # BSCALE and BZERO should NOT be in header any more. assert 'BSCALE' not in uint_hdu.header assert 'BZERO' not in uint_hdu.header # This is the main test...the data values should round trip # as zero. filename = 'test_uint_to_float.fits' uint_hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename)) as hdul: assert (hdul[0].data == 0).all() def test_blanks(self): """Test image data with blank spots in it (which should show up as NaNs in the data array. """ arr = np.zeros((10, 10), dtype=np.int32) # One row will be blanks arr[1] = 999 hdu = fits.ImageHDU(data=arr) hdu.header['BLANK'] = 999 hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) assert np.isnan(hdul[1].data[1]).all() def test_invalid_blanks(self): """ Test that invalid use of the BLANK keyword leads to an appropriate warning, and that the BLANK keyword is ignored when returning the HDU data. Regression test for https://github.com/astropy/astropy/issues/3865 """ arr = np.arange(5, dtype=np.float64) hdu = fits.PrimaryHDU(data=arr) hdu.header['BLANK'] = 2 with catch_warnings() as w: hdu.writeto(self.temp('test_new.fits')) # Allow the HDU to be written, but there should be a warning # when writing a header with BLANK when then data is not # int assert len(w) == 1 assert "Invalid 'BLANK' keyword in header" in str(w[0].message) # Should also get a warning when opening the file, and the BLANK # value should not be applied with catch_warnings() as w: with fits.open(self.temp('test_new.fits')) as h: assert len(w) == 1 assert "Invalid 'BLANK' keyword in header" in str(w[0].message) assert np.all(arr == h[0].data) def test_scale_back_with_blanks(self): """ Test that when auto-rescaling integer data with "blank" values (where the blanks are replaced by NaN in the float data), that the "BLANK" keyword is removed from the header. Further, test that when using the ``scale_back=True`` option the blank values are restored properly. Regression test for https://github.com/astropy/astropy/issues/3865 """ # Make the sample file arr = np.arange(5, dtype=np.int32) hdu = fits.PrimaryHDU(data=arr) hdu.scale('int16', bscale=1.23) # Creating data that uses BLANK is currently kludgy--a separate issue # TODO: Rewrite this test when scaling with blank support is better # supported # Let's just add a value to the data that should be converted to NaN # when it is read back in: hdu.data[0] = 9999 hdu.header['BLANK'] = 9999 hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: data = hdul[0].data assert np.isnan(data[0]) hdul.writeto(self.temp('test2.fits')) # Now reopen the newly written file. It should not have a 'BLANK' # keyword with catch_warnings() as w: with fits.open(self.temp('test2.fits')) as hdul2: assert len(w) == 0 assert 'BLANK' not in hdul2[0].header data = hdul2[0].data assert np.isnan(data[0]) # Finally, test that scale_back keeps the BLANKs correctly with fits.open(self.temp('test.fits'), scale_back=True, mode='update') as hdul3: data = hdul3[0].data assert np.isnan(data[0]) with fits.open(self.temp('test.fits'), do_not_scale_image_data=True) as hdul4: assert hdul4[0].header['BLANK'] == 9999 assert hdul4[0].header['BSCALE'] == 1.23 assert hdul4[0].data[0] == 9999 def test_bzero_with_floats(self): """Test use of the BZERO keyword in an image HDU containing float data. """ arr = np.zeros((10, 10)) - 1 hdu = fits.ImageHDU(data=arr) hdu.header['BZERO'] = 1.0 hdu.writeto(self.temp('test_new.fits')) hdul = fits.open(self.temp('test_new.fits')) arr += 1 assert (hdul[1].data == arr).all() def test_rewriting_large_scaled_image(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101 """ hdul = fits.open(self.data('fixed-1890.fits')) orig_data = hdul[0].data with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), overwrite=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.data('fixed-1890.fits')) with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), overwrite=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.data('fixed-1890.fits'), do_not_scale_image_data=True) hdul.writeto(self.temp('test_new.fits'), overwrite=True, output_verify='silentfix') hdul.close() hdul = fits.open(self.temp('test_new.fits')) orig_data = hdul[0].data hdul.close() hdul = fits.open(self.temp('test_new.fits'), mode='update') hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[0].data == orig_data).all() hdul = fits.open(self.temp('test_new.fits')) hdul.close() def test_image_update_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105 Replacing the original header to an image HDU and saving should update the NAXISn keywords appropriately and save the image data correctly. """ # Copy the original file before saving to it self.copy_file('test0.fits') with fits.open(self.temp('test0.fits'), mode='update') as hdul: orig_data = hdul[1].data.copy() hdr_copy = hdul[1].header.copy() del hdr_copy['NAXIS*'] hdul[1].header = hdr_copy with fits.open(self.temp('test0.fits')) as hdul: assert (orig_data == hdul[1].data).all() def test_open_scaled_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119 (Don't update scaled image data if the data is not read) This ensures that merely opening and closing a file containing scaled image data does not cause any change to the data (or the header). Changes should only occur if the data is accessed. """ # Copy the original file before making any possible changes to it self.copy_file('scale.fits') mtime = os.stat(self.temp('scale.fits')).st_mtime time.sleep(1) fits.open(self.temp('scale.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('scale.fits')).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp('scale.fits'), 'update') orig_data = hdul[0].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp('scale.fits')).st_mtime hdul = fits.open(self.temp('scale.fits'), mode='update') assert hdul[0].data.dtype == np.dtype('>f4') assert hdul[0].header['BITPIX'] == -32 assert 'BZERO' not in hdul[0].header assert 'BSCALE' not in hdul[0].header assert (orig_data == hdul[0].data).all() # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preseved properly hdul[0].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp('scale.fits')) assert hdul[0].shape == (42, 10) assert hdul[0].data.dtype == np.dtype('>f4') assert hdul[0].header['BITPIX'] == -32 assert 'BZERO' not in hdul[0].header assert 'BSCALE' not in hdul[0].header def test_scale_back(self): """A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120 The scale_back feature for image HDUs. """ self.copy_file('scale.fits') with fits.open(self.temp('scale.fits'), mode='update', scale_back=True) as hdul: orig_bitpix = hdul[0].header['BITPIX'] orig_bzero = hdul[0].header['BZERO'] orig_bscale = hdul[0].header['BSCALE'] orig_data = hdul[0].data.copy() hdul[0].data[0] = 0 with fits.open(self.temp('scale.fits'), do_not_scale_image_data=True) as hdul: assert hdul[0].header['BITPIX'] == orig_bitpix assert hdul[0].header['BZERO'] == orig_bzero assert hdul[0].header['BSCALE'] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[0].data[0] == zero_point).all() with fits.open(self.temp('scale.fits')) as hdul: assert (hdul[0].data[1:] == orig_data[1:]).all() def test_image_none(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data('test0.fits')) as h: h[1].data h[1].data = None h[1].writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].data is None assert h[1].header['NAXIS'] == 0 assert 'NAXIS1' not in h[1].header assert 'NAXIS2' not in h[1].header def test_invalid_blank(self): """ Regression test for https://github.com/astropy/astropy/issues/2711 If the BLANK keyword contains an invalid value it should be ignored for any calculations (though a warning should be issued). """ data = np.arange(100, dtype=np.float64) hdu = fits.PrimaryHDU(data) hdu.header['BLANK'] = 'nan' hdu.writeto(self.temp('test.fits')) with catch_warnings() as w: with fits.open(self.temp('test.fits')) as hdul: assert np.all(hdul[0].data == data) assert len(w) == 2 msg = "Invalid value for 'BLANK' keyword in header" assert msg in str(w[0].message) msg = "Invalid 'BLANK' keyword" assert msg in str(w[1].message) def test_scaled_image_fromfile(self): """ Regression test for https://github.com/astropy/astropy/issues/2710 """ # Make some sample data a = np.arange(100, dtype=np.float32) hdu = fits.PrimaryHDU(data=a.copy()) hdu.scale(bscale=1.1) hdu.writeto(self.temp('test.fits')) with open(self.temp('test.fits'), 'rb') as f: file_data = f.read() hdul = fits.HDUList.fromstring(file_data) assert np.allclose(hdul[0].data, a) def test_set_data(self): """ Test data assignment - issue #5087 """ im = fits.ImageHDU() ar = np.arange(12) im.data = ar def test_scale_bzero_with_int_data(self): """ Regression test for https://github.com/astropy/astropy/issues/4600 """ a = np.arange(100, 200, dtype=np.int16) hdu1 = fits.PrimaryHDU(data=a.copy()) hdu2 = fits.PrimaryHDU(data=a.copy()) # Previously the following line would throw a TypeError, # now it should be identical to the integer bzero case hdu1.scale('int16', bzero=99.0) hdu2.scale('int16', bzero=99) assert np.allclose(hdu1.data, hdu2.data) def test_scale_back_uint_assignment(self): """ Extend fix for #4600 to assignment to data Suggested by: https://github.com/astropy/astropy/pull/4602#issuecomment-208713748 """ a = np.arange(100, 200, dtype=np.uint16) fits.PrimaryHDU(a).writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), mode="update", scale_back=True) as (hdu,): hdu.data[:] = 0 assert np.allclose(hdu.data, 0) class TestCompressedImage(FitsTestCase): def test_empty(self): """ Regression test for https://github.com/astropy/astropy/issues/2595 """ hdu = fits.CompImageHDU() assert hdu.data is None hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), mode='update') as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert hdul[1].data is None # Now test replacing the empty data with an array and see what # happens hdul[1].data = np.arange(100, dtype=np.int32) with fits.open(self.temp('test.fits')) as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert np.all(hdul[1].data == np.arange(100, dtype=np.int32)) @pytest.mark.parametrize( ('data', 'compression_type', 'quantize_level'), [(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16), (np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01), (np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01), (np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16), (np.zeros((10, 10)), 'PLIO_1', 16)]) @pytest.mark.parametrize('byte_order', ['<', '>']) def test_comp_image(self, data, compression_type, quantize_level, byte_order): data = data.newbyteorder(byte_order) primary_hdu = fits.PrimaryHDU() ofd = fits.HDUList(primary_hdu) chdu = fits.CompImageHDU(data, name='SCI', compression_type=compression_type, quantize_level=quantize_level) ofd.append(chdu) ofd.writeto(self.temp('test_new.fits'), overwrite=True) ofd.close() with fits.open(self.temp('test_new.fits')) as fd: assert (fd[1].data == data).all() assert fd[1].header['NAXIS'] == chdu.header['NAXIS'] assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1'] assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2'] assert fd[1].header['BITPIX'] == chdu.header['BITPIX'] @pytest.mark.skipif('not HAS_SCIPY') def test_comp_image_quantize_level(self): """ Regression test for https://github.com/astropy/astropy/issues/5969 Test that quantize_level is used. """ import scipy.misc np.random.seed(42) data = scipy.misc.ascent() + np.random.randn(512, 512)*10 fits.ImageHDU(data).writeto(self.temp('im1.fits')) fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1, quantize_level=-1, dither_seed=5)\ .writeto(self.temp('im2.fits')) fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1, quantize_level=-100, dither_seed=5)\ .writeto(self.temp('im3.fits')) im1 = fits.getdata(self.temp('im1.fits')) im2 = fits.getdata(self.temp('im2.fits')) im3 = fits.getdata(self.temp('im3.fits')) assert not np.array_equal(im2, im3) assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3) assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3) assert np.isclose(np.min(im1 - im3), -50, atol=1e-1) assert np.isclose(np.max(im1 - im3), 50, atol=1e-1) def test_comp_image_hcompression_1_invalid_data(self): """ Tests compression with the HCOMPRESS_1 algorithm with data that is not 2D and has a non-2D tile size. """ pytest.raises(ValueError, fits.CompImageHDU, np.zeros((2, 10, 10), dtype=np.float32), name='SCI', compression_type='HCOMPRESS_1', quantize_level=16, tile_size=[2, 10, 10]) def test_comp_image_hcompress_image_stack(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171 Tests that data containing more than two dimensions can be compressed with HCOMPRESS_1 so long as the user-supplied tile size can be flattened to two dimensions. """ cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10) hdu = fits.CompImageHDU(data=cube, name='SCI', compression_type='HCOMPRESS_1', quantize_level=16, tile_size=[5, 5, 1]) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: # HCOMPRESSed images are allowed to deviate from the original by # about 1/quantize_level of the RMS in each tile. assert np.abs(hdul['SCI'].data - cube).max() < 1./15. def test_subtractive_dither_seed(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/32 Ensure that when floating point data is compressed with the SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed is added to the header, and that the data can be correctly decompressed. """ array = np.arange(100.0).reshape(10, 10) csum = (array[0].view('uint8').sum() % 10000) + 1 hdu = fits.CompImageHDU(data=array, quantize_method=SUBTRACTIVE_DITHER_1, dither_seed=DITHER_SEED_CHECKSUM) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) assert 'ZQUANTIZ' in hdul[1]._header assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1' assert 'ZDITHER0' in hdul[1]._header assert hdul[1]._header['ZDITHER0'] == csum assert np.all(hdul[1].data == array) def test_disable_image_compression(self): with catch_warnings(): # No warnings should be displayed in this case warnings.simplefilter('error') with fits.open(self.data('comp.fits'), disable_image_compression=True) as hdul: # The compressed image HDU should show up as a BinTableHDU, but # *not* a CompImageHDU assert isinstance(hdul[1], fits.BinTableHDU) assert not isinstance(hdul[1], fits.CompImageHDU) with fits.open(self.data('comp.fits')) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) def test_open_comp_image_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167 Similar to test_open_scaled_in_update_mode(), but specifically for compressed images. """ # Copy the original file before making any possible changes to it self.copy_file('comp.fits') mtime = os.stat(self.temp('comp.fits')).st_mtime time.sleep(1) fits.open(self.temp('comp.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('comp.fits')).st_mtime def test_open_scaled_in_update_mode_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2 Identical to test_open_scaled_in_update_mode() but with a compressed version of the scaled image. """ # Copy+compress the original file before making any possible changes to # it with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('scale.fits')) mtime = os.stat(self.temp('scale.fits')).st_mtime time.sleep(1) fits.open(self.temp('scale.fits'), mode='update').close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp('scale.fits')).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp('scale.fits'), 'update') hdul[1].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp('scale.fits')).st_mtime hdul = fits.open(self.temp('scale.fits'), mode='update') assert hdul[1].data.dtype == np.dtype('float32') assert hdul[1].header['BITPIX'] == -32 assert 'BZERO' not in hdul[1].header assert 'BSCALE' not in hdul[1].header # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preseved properly hdul[1].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp('scale.fits')) assert hdul[1].shape == (42, 10) assert hdul[1].data.dtype == np.dtype('float32') assert hdul[1].header['BITPIX'] == -32 assert 'BZERO' not in hdul[1].header assert 'BSCALE' not in hdul[1].header def test_write_comp_hdu_direct_from_existing(self): with fits.open(self.data('comp.fits')) as hdul: hdul[1].writeto(self.temp('test.fits')) with fits.open(self.data('comp.fits')) as hdul1: with fits.open(self.temp('test.fits')) as hdul2: assert np.all(hdul1[1].data == hdul2[1].data) assert comparerecords(hdul1[1].compressed_data, hdul2[1].compressed_data) def test_rewriting_large_scaled_image_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1 Identical to test_rewriting_large_scaled_image() but with a compressed image. """ with fits.open(self.data('fixed-1890.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('fixed-1890-z.fits')) hdul = fits.open(self.temp('fixed-1890-z.fits')) orig_data = hdul[1].data with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), overwrite=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.temp('fixed-1890-z.fits')) with ignore_warnings(): hdul.writeto(self.temp('test_new.fits'), overwrite=True) hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.temp('fixed-1890-z.fits'), do_not_scale_image_data=True) hdul.writeto(self.temp('test_new.fits'), overwrite=True, output_verify='silentfix') hdul.close() hdul = fits.open(self.temp('test_new.fits')) orig_data = hdul[1].data hdul.close() hdul = fits.open(self.temp('test_new.fits'), mode='update') hdul.close() hdul = fits.open(self.temp('test_new.fits')) assert (hdul[1].data == orig_data).all() hdul = fits.open(self.temp('test_new.fits')) hdul.close() def test_scale_back_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3 Identical to test_scale_back() but uses a compressed image. """ # Create a compressed version of the scaled image with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp('scale.fits')) with fits.open(self.temp('scale.fits'), mode='update', scale_back=True) as hdul: orig_bitpix = hdul[1].header['BITPIX'] orig_bzero = hdul[1].header['BZERO'] orig_bscale = hdul[1].header['BSCALE'] orig_data = hdul[1].data.copy() hdul[1].data[0] = 0 with fits.open(self.temp('scale.fits'), do_not_scale_image_data=True) as hdul: assert hdul[1].header['BITPIX'] == orig_bitpix assert hdul[1].header['BZERO'] == orig_bzero assert hdul[1].header['BSCALE'] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[1].data[0] == zero_point).all() with fits.open(self.temp('scale.fits')) as hdul: assert (hdul[1].data[1:] == orig_data[1:]).all() # Extra test to ensure that after everything the data is still the # same as in the original uncompressed version of the image with fits.open(self.data('scale.fits')) as hdul2: # Recall we made the same modification to the data in hdul # above hdul2[0].data[0] = 0 assert (hdul[1].data == hdul2[0].data).all() def test_lossless_gzip_compression(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198""" noise = np.random.normal(size=(1000, 1000)) chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1') # First make a test image with lossy compression and make sure it # wasn't compressed perfectly. This shouldn't happen ever, but just to # make sure the test non-trivial. chdu1.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert np.abs(noise - h[1].data).max() > 0.0 del h chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1', quantize_level=0.0) # No quantization with ignore_warnings(): chdu2.writeto(self.temp('test.fits'), overwrite=True) with fits.open(self.temp('test.fits')) as h: assert (noise == h[1].data).all() def test_compression_column_tforms(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199""" # Some interestingly tiled data so that some of it is quantized and # some of it ends up just getting gzip-compressed data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange(1, 7)) np.random.seed(1337) data1 = np.random.uniform(size=(6 * 4, 7 * 4)) data1[:data2.shape[0], :data2.shape[1]] = data2 chdu = fits.CompImageHDU(data1, compression_type='RICE_1', tile_size=(6, 7)) chdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), disable_image_compression=True) as h: assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1']) assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2']) def test_compression_update_header(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/23 """ self.copy_file('comp.fits') with fits.open(self.temp('comp.fits'), mode='update') as hdul: assert isinstance(hdul[1], fits.CompImageHDU) hdul[1].header['test1'] = 'test' hdul[1]._header['test2'] = 'test2' with fits.open(self.temp('comp.fits')) as hdul: assert 'test1' in hdul[1].header assert hdul[1].header['test1'] == 'test' assert 'test2' in hdul[1].header assert hdul[1].header['test2'] == 'test2' # Test update via index now: with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header hdr[hdr.index('TEST1')] = 'foo' with fits.open(self.temp('comp.fits')) as hdul: assert hdul[1].header['TEST1'] == 'foo' # Test slice updates with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdul[1].header['TEST*'] = 'qux' with fits.open(self.temp('comp.fits')) as hdul: assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux'] with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header idx = hdr.index('TEST1') hdr[idx:idx + 2] = 'bar' with fits.open(self.temp('comp.fits')) as hdul: assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar'] # Test updating a specific COMMENT card duplicate with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!' with fits.open(self.temp('comp.fits')) as hdul: assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!' assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!' # Test deleting by keyword and by slice with fits.open(self.temp('comp.fits'), mode='update') as hdul: hdr = hdul[1].header del hdr['COMMENT'] idx = hdr.index('TEST1') del hdr[idx:idx + 2] with fits.open(self.temp('comp.fits')) as hdul: assert 'COMMENT' not in hdul[1].header assert 'COMMENT' not in hdul[1]._header assert 'TEST1' not in hdul[1].header assert 'TEST1' not in hdul[1]._header assert 'TEST2' not in hdul[1].header assert 'TEST2' not in hdul[1]._header def test_compression_update_header_with_reserved(self): """ Ensure that setting reserved keywords related to the table data structure on CompImageHDU image headers fails. """ def test_set_keyword(hdr, keyword, value): with catch_warnings() as w: hdr[keyword] = value assert len(w) == 1 assert str(w[0].message).startswith( "Keyword {!r} is reserved".format(keyword)) assert keyword not in hdr with fits.open(self.data('comp.fits')) as hdul: hdr = hdul[1].header test_set_keyword(hdr, 'TFIELDS', 8) test_set_keyword(hdr, 'TTYPE1', 'Foo') test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF') test_set_keyword(hdr, 'ZVAL1', 'Foo') def test_compression_header_append(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with catch_warnings() as w: imghdr.append('TFIELDS') assert len(w) == 1 assert 'TFIELDS' not in imghdr imghdr.append(('FOO', 'bar', 'qux'), end=True) assert 'FOO' in imghdr assert imghdr[-1] == 'bar' assert 'FOO' in tblhdr assert tblhdr[-1] == 'bar' imghdr.append(('CHECKSUM', 'abcd1234')) assert 'CHECKSUM' in imghdr assert imghdr['CHECKSUM'] == 'abcd1234' assert 'CHECKSUM' not in tblhdr assert 'ZHECKSUM' in tblhdr assert tblhdr['ZHECKSUM'] == 'abcd1234' def test_compression_header_append2(self): """ Regresion test for issue https://github.com/astropy/astropy/issues/5827 """ with fits.open(self.data('comp.fits')) as hdul: header = hdul[1].header while (len(header) < 1000): header.append() # pad with grow room # Append stats to header: header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean")) header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev")) header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median")) def test_compression_header_insert(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header # First try inserting a restricted keyword with catch_warnings() as w: imghdr.insert(1000, 'TFIELDS') assert len(w) == 1 assert 'TFIELDS' not in imghdr assert tblhdr.count('TFIELDS') == 1 # First try keyword-relative insert imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait')) assert 'OBSERVER' in imghdr assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1 assert 'OBSERVER' in tblhdr assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1 # Next let's see if an index-relative insert winds up being # sensible idx = imghdr.index('OBSERVER') imghdr.insert('OBSERVER', ('FOO',)) assert 'FOO' in imghdr assert imghdr.index('FOO') == idx assert 'FOO' in tblhdr assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1 def test_compression_header_set_before_after(self): with fits.open(self.data('comp.fits')) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with catch_warnings() as w: imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION') assert len(w) == 1 assert 'ZBITPIX' not in imghdr assert tblhdr.count('ZBITPIX') == 1 assert tblhdr['ZBITPIX'] != 77 # Move GCOUNT before PCOUNT (not that there's any reason you'd # *want* to do that, but it's just a test...) imghdr.set('GCOUNT', 99, before='PCOUNT') assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1 assert imghdr['GCOUNT'] == 99 assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1 assert tblhdr['ZGCOUNT'] == 99 assert tblhdr.index('PCOUNT') == 5 assert tblhdr.index('GCOUNT') == 6 assert tblhdr['GCOUNT'] == 1 imghdr.set('GCOUNT', 2, after='PCOUNT') assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1 assert imghdr['GCOUNT'] == 2 assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1 assert tblhdr['ZGCOUNT'] == 2 assert tblhdr.index('PCOUNT') == 5 assert tblhdr.index('GCOUNT') == 6 assert tblhdr['GCOUNT'] == 1 def test_compression_header_append_commentary(self): """ Regression test for https://github.com/astropy/astropy/issues/2363 """ hdu = fits.CompImageHDU(np.array([0], dtype=np.int32)) hdu.header['COMMENT'] = 'hello world' assert hdu.header['COMMENT'] == ['hello world'] hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert hdul[1].header['COMMENT'] == ['hello world'] def test_compression_with_gzip_column(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/71 """ arr = np.zeros((2, 7000), dtype='float32') # The first row (which will be the first compressed tile) has a very # wide range of values that will be difficult to quantize, and should # result in use of a GZIP_COMPRESSED_DATA column arr[0] = np.linspace(0, 1, 7000) arr[1] = np.random.normal(size=7000) hdu = fits.CompImageHDU(data=arr) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: comp_hdu = hdul[1] # GZIP-compressed tile should compare exactly assert np.all(comp_hdu.data[0] == arr[0]) # The second tile uses lossy compression and may be somewhat off, # so we don't bother comparing it exactly def test_duplicate_compression_header_keywords(self): """ Regression test for https://github.com/astropy/astropy/issues/2750 Tests that the fake header (for the compressed image) can still be read even if the real header contained a duplicate ZTENSION keyword (the issue applies to any keyword specific to the compression convention, however). """ arr = np.arange(100, dtype=np.int32) hdu = fits.CompImageHDU(data=arr) header = hdu._header # append the duplicate keyword hdu._header.append(('ZTENSION', 'IMAGE')) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as hdul: assert header == hdul[1]._header # There's no good reason to have a duplicate keyword, but # technically it isn't invalid either :/ assert hdul[1]._header.count('ZTENSION') == 2 def test_scale_bzero_with_compressed_int_data(self): """ Regression test for https://github.com/astropy/astropy/issues/4600 and https://github.com/astropy/astropy/issues/4588 Identical to test_scale_bzero_with_int_data() but uses a compressed image. """ a = np.arange(100, 200, dtype=np.int16) hdu1 = fits.CompImageHDU(data=a.copy()) hdu2 = fits.CompImageHDU(data=a.copy()) # Previously the following line would throw a TypeError, # now it should be identical to the integer bzero case hdu1.scale('int16', bzero=99.0) hdu2.scale('int16', bzero=99) assert np.allclose(hdu1.data, hdu2.data) def test_scale_back_compressed_uint_assignment(self): """ Extend fix for #4600 to assignment to data Identical to test_scale_back_uint_assignment() but uses a compressed image. Suggested by: https://github.com/astropy/astropy/pull/4602#issuecomment-208713748 """ a = np.arange(100, 200, dtype=np.uint16) fits.CompImageHDU(a).writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits'), mode="update", scale_back=True) as hdul: hdul[1].data[:] = 0 assert np.allclose(hdul[1].data, 0) def test_compressed_header_missing_znaxis(self): a = np.arange(100, 200, dtype=np.uint16) comp_hdu = fits.CompImageHDU(a) comp_hdu._header.pop('ZNAXIS') with pytest.raises(KeyError): comp_hdu.compressed_data comp_hdu = fits.CompImageHDU(a) comp_hdu._header.pop('ZBITPIX') with pytest.raises(KeyError): comp_hdu.compressed_data @pytest.mark.parametrize( ('keyword', 'dtype', 'expected'), [('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32), ('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32), ('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)]) def test_compressed_scaled_float(self, keyword, dtype, expected): """ If BSCALE,BZERO is set to floating point values, the image should be floating-point. https://github.com/astropy/astropy/pull/6492 Parameters ---------- keyword : `str` Keyword to set to a floating-point value to trigger floating-point pixels. dtype : `numpy.dtype` Type of original array. expected : `numpy.dtype` Expected type of uncompressed array. """ value = 1.23345 # A floating-point value hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype)) hdu.header[keyword] = value hdu.writeto(self.temp('test.fits')) del hdu with fits.open(self.temp('test.fits')) as hdu: assert hdu[1].header[keyword] == value assert hdu[1].data.dtype == expected def test_comphdu_bscale(tmpdir): """ Regression test for a bug that caused extensions that used BZERO and BSCALE that got turned into CompImageHDU to end up with BZERO/BSCALE before the TFIELDS. """ filename1 = tmpdir.join('3hdus.fits').strpath filename2 = tmpdir.join('3hdus_comp.fits').strpath x = np.random.random((100, 100))*100 x0 = fits.PrimaryHDU() x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True) x1.header['BZERO'] = 20331 x1.header['BSCALE'] = 2.3 hdus = fits.HDUList([x0, x1]) hdus.writeto(filename1) # fitsverify (based on cfitsio) should fail on this file, only seeing the # first HDU. hdus = fits.open(filename1) hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32), header=hdus[1].header) hdus.writeto(filename2) # open again and verify hdus = fits.open(filename2) hdus[1].verify('exception') def test_scale_implicit_casting(): # Regression test for an issue that occurred because Numpy now does not # allow implicit type casting during inplace operations. hdu = fits.ImageHDU(np.array([1], dtype=np.int32)) hdu.scale(bzero=1.3) def test_bzero_implicit_casting_compressed(): # Regression test for an issue that occurred because Numpy now does not # allow implicit type casting during inplace operations. Astropy is # actually not able to produce a file that triggers the failure - the # issue occurs when using unsigned integer types in the FITS file, in which # case BZERO should be 32768. But if the keyword is stored as 32768.0, then # it was possible to trigger the implicit casting error. filename = os.path.join(os.path.dirname(__file__), 'data', 'compressed_float_bzero.fits') hdu = fits.open(filename)[1] hdu.data def test_bzero_mishandled_info(tmpdir): # Regression test for #5507: # Calling HDUList.info() on a dataset which applies a zeropoint # from BZERO but which astropy.io.fits does not think it needs # to resize to a new dtype results in an AttributeError. filename = tmpdir.join('floatimg_with_bzero.fits').strpath hdu = fits.ImageHDU(np.zeros((10, 10))) hdu.header['BZERO'] = 10 hdu.writeto(filename, overwrite=True) hdul = fits.open(filename) hdul.info() def test_image_write_readonly(tmpdir): # Regression test to make sure that we can write out read-only arrays (#5512) x = np.array([1, 2, 3]) x.setflags(write=False) ghdu = fits.ImageHDU(data=x) ghdu.add_datasum() filename = tmpdir.join('test.fits').strpath ghdu.writeto(filename) with fits.open(filename) as hdulist: assert_equal(hdulist[1].data, [1, 2, 3]) # Same for compressed HDU x = np.array([1.0, 2.0, 3.0]) x.setflags(write=False) ghdu = fits.CompImageHDU(data=x) # add_datasum does not work for CompImageHDU # ghdu.add_datasum() filename = tmpdir.join('test2.fits').strpath ghdu.writeto(filename) with fits.open(filename) as hdulist: assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
11163864cb3fb5eff0b22398e556e429e86cf7a5284b16fb047bc8e445f5074d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import functools from io import BytesIO from textwrap import dedent import pytest import numpy as np from numpy import ma from ....table import Table, MaskedColumn from ... import ascii from ...ascii.core import ParameterError, FastOptionsError from ...ascii.cparser import CParserError from ..fastbasic import ( FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader) from .common import assert_equal, assert_almost_equal, assert_true StringIO = lambda x: BytesIO(x.encode('ascii')) TRAVIS = os.environ.get('TRAVIS', False) def assert_table_equal(t1, t2, check_meta=False): assert_equal(len(t1), len(t2)) assert_equal(t1.colnames, t2.colnames) if check_meta: assert_equal(t1.meta, t2.meta) for name in t1.colnames: if len(t1) != 0: assert_equal(t1[name].dtype.kind, t2[name].dtype.kind) if not isinstance(t1[name], MaskedColumn): for i, el in enumerate(t1[name]): try: if not isinstance(el, str) and np.isnan(el): assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i])) elif isinstance(el, str): assert_equal(el, t2[name][i]) else: assert_almost_equal(el, t2[name][i]) except (TypeError, NotImplementedError): pass # ignore for now # Use this counter to create a unique filename for each file created in a test # if this function is called more than once in a single test _filename_counter = 0 def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs): # make sure we have a newline so table can't be misinterpreted as a filename global _filename_counter table += '\n' reader = Reader(**kwargs) t1 = reader.read(table) t2 = reader.read(StringIO(table)) t3 = reader.read(table.splitlines()) t4 = ascii.read(table, format=format, guess=False, **kwargs) t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs) assert_table_equal(t1, t2, check_meta=check_meta) assert_table_equal(t2, t3, check_meta=check_meta) assert_table_equal(t3, t4, check_meta=check_meta) assert_table_equal(t4, t5, check_meta=check_meta) if parallel: if TRAVIS: pytest.xfail("Multiprocessing can sometimes fail on Travis CI") elif os.name == 'nt': pytest.xfail("Multiprocessing is currently unsupported on Windows") t6 = ascii.read(table, format=format, guess=False, fast_reader={ 'parallel': True}, **kwargs) assert_table_equal(t1, t6, check_meta=check_meta) filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter))) _filename_counter += 1 with open(filename, 'wb') as f: f.write(table.encode('ascii')) f.flush() t7 = ascii.read(filename, format=format, guess=False, **kwargs) if parallel: t8 = ascii.read(filename, format=format, guess=False, fast_reader={ 'parallel': True}, **kwargs) assert_table_equal(t1, t7, check_meta=check_meta) if parallel: assert_table_equal(t1, t8, check_meta=check_meta) return t1 @pytest.fixture(scope='function') def read_basic(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic') @pytest.fixture(scope='function') def read_csv(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv') @pytest.fixture(scope='function') def read_tab(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastTab, format='tab') @pytest.fixture(scope='function') def read_commented_header(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastCommentedHeader, format='commented_header') @pytest.fixture(scope='function') def read_rdb(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb') @pytest.fixture(scope='function') def read_no_header(tmpdir, request): return functools.partial(_read, tmpdir, Reader=FastNoHeader, format='no_header') @pytest.mark.parametrize("parallel", [True, False]) def test_simple_data(parallel, read_basic): """ Make sure the fast reader works with basic input data. """ table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(table, expected) def test_read_types(): """ Make sure that the read() function takes filenames, strings, and lists of strings in addition to file-like objects. """ t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False) # TODO: also read from file t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False) t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False) assert_table_equal(t1, t2) assert_table_equal(t2, t3) @pytest.mark.parametrize("parallel", [True, False]) def test_supplied_names(parallel, read_basic): """ If passed as a parameter, names should replace any column names found in the header. """ table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_header(parallel, read_basic, read_no_header): """ The header should not be read when header_start=None. Unless names is passed, the column names should be auto-generated. """ # Cannot set header_start=None for basic format with pytest.raises(ValueError): read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel) t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel) expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3')) assert_table_equal(t2, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_header_supplied_names(parallel, read_basic, read_no_header): """ If header_start=None and names is passed as a parameter, header data should not be read and names should be used instead. """ table = read_no_header("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel) expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_comment(parallel, read_basic): """ Make sure that line comments are ignored by the C reader. """ table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_empty_lines(parallel, read_basic): """ Make sure that empty lines are ignored by the C reader. """ table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_lstrip_whitespace(parallel, read_basic): """ Test to make sure the reader ignores whitespace at the beginning of fields. """ text = """ 1, 2, \t3 A,\t\t B, C a, b, c """ + ' \n' table = read_basic(text, delimiter=',', parallel=parallel) expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_rstrip_whitespace(parallel, read_basic): """ Test to make sure the reader ignores whitespace at the end of fields. """ text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n' table = read_basic(text, delimiter=',', parallel=parallel) expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_conversion(parallel, read_basic): """ The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, it should fall back to strings. """ text = """ A B C D E 1 a 3 4 5 2. 1 9 10 -5.3e4 4 2 -12 .4 six """ table = read_basic(text, parallel=parallel) assert_equal(table['A'].dtype.kind, 'f') assert table['B'].dtype.kind in ('S', 'U') assert_equal(table['C'].dtype.kind, 'i') assert_equal(table['D'].dtype.kind, 'f') assert table['E'].dtype.kind in ('S', 'U') @pytest.mark.parametrize("parallel", [True, False]) def test_delimiter(parallel, read_basic): """ Make sure that different delimiters work as expected. """ text = """ COL1 COL2 COL3 1 A -1 2 B -2 """ expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3')) for sep in ' ,\t#;': table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_include_names(parallel, read_basic): """ If include_names is not None, the parser should read only those columns in include_names. """ table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel) expected = Table([[1, 5], [4, 8]], names=('A', 'D')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_exclude_names(parallel, read_basic): """ If exclude_names is not None, the parser should exclude the columns in exclude_names. """ table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel) expected = Table([[2, 6], [3, 7]], names=('B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_include_exclude_names(parallel, read_basic): """ Make sure that include_names is applied before exclude_names if both are specified. """ text = """ A B C D E F G H 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 """ table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'], exclude_names=['B', 'F'], parallel=parallel) expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_quoted_fields(parallel, read_basic): """ The character quotechar (default '"') should denote the start of a field which can contain the field delimiter and newlines. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = """ "A B" C D 1.5 2.1 -37.1 a b " c d" """ table = read_basic(text, parallel=parallel) expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D')) assert_table_equal(table, expected) table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize("key,val", [ ('delimiter', ',,'), # multi-char delimiter ('comment', '##'), # multi-char comment ('data_start', None), # data_start=None ('data_start', -1), # data_start negative ('quotechar', '##'), # multi-char quote signifier ('header_start', -1), # negative header_start ('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters ('Inputter', ascii.ContinuationLinesInputter), # passing Inputter ('header_Splitter', ascii.DefaultSplitter), # passing Splitter ('data_Splitter', ascii.DefaultSplitter)]) def test_invalid_parameters(key, val): """ Make sure the C reader raises an error if passed parameters it can't handle. """ with pytest.raises(ParameterError): FastBasic(**{key: val}).read('1 2 3\n4 5 6') with pytest.raises(ParameterError): ascii.read('1 2 3\n4 5 6', format='fast_basic', guess=False, **{key: val}) def test_invalid_parameters_other(): with pytest.raises(TypeError): FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument with pytest.raises(FastOptionsError): # don't fall back on the slow reader ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7}) with pytest.raises(ParameterError): # Outputter cannot be specified in constructor FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6') def test_too_many_cols1(): """ If a row contains too many columns, the C reader should raise an error. """ text = """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 13 """ with pytest.raises(CParserError) as e: table = FastBasic().read(text) assert 'CParserError: an error occurred while parsing table data: too many ' \ 'columns found in line 3 of data' in str(e) def test_too_many_cols2(): text = """\ aaa,bbb 1,2, 3,4, """ with pytest.raises(CParserError) as e: table = FastCsv().read(text) assert 'CParserError: an error occurred while parsing table data: too many ' \ 'columns found in line 1 of data' in str(e) def test_too_many_cols3(): text = """\ aaa,bbb 1,2,, 3,4, """ with pytest.raises(CParserError) as e: table = FastCsv().read(text) assert 'CParserError: an error occurred while parsing table data: too many ' \ 'columns found in line 1 of data' in str(e) @pytest.mark.parametrize("parallel", [True, False]) def test_not_enough_cols(parallel, read_csv): """ If a row does not have enough columns, the FastCsv reader should add empty fields while the FastBasic reader should raise an error. """ text = """ A,B,C 1,2,3 4,5 6,7,8 """ table = read_csv(text, parallel=parallel) assert table['B'][1] is not ma.masked assert table['C'][1] is ma.masked with pytest.raises(CParserError) as e: table = FastBasic(delimiter=',').read(text) @pytest.mark.parametrize("parallel", [True, False]) def test_data_end(parallel, read_basic, read_rdb): """ The parameter data_end should specify where data reading ends. """ text = """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 """ table = read_basic(text, data_end=3, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(table, expected) # data_end supports negative indexing table = read_basic(text, data_end=-2, parallel=parallel) assert_table_equal(table, expected) text = """ A\tB\tC N\tN\tS 1\t2\ta 3\t4\tb 5\t6\tc """ # make sure data_end works with RDB table = read_rdb(text, data_end=-1, parallel=parallel) expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C')) assert_table_equal(table, expected) # positive index table = read_rdb(text, data_end=3, parallel=parallel) expected = Table([[1], [2], ['a']], names=('A', 'B', 'C')) assert_table_equal(table, expected) # empty table if data_end is too small table = read_rdb(text, data_end=1, parallel=parallel) expected = Table([[], [], []], names=('A', 'B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_inf_nan(parallel, read_basic): """ Test that inf and nan-like values are correctly parsed on all platforms. Regression test for https://github.com/astropy/astropy/pull/3525 """ text = dedent("""\ A nan +nan -nan inf infinity +inf +infinity -inf -infinity """) expected = Table({'A': [np.nan, np.nan, np.nan, np.inf, np.inf, np.inf, np.inf, -np.inf, -np.inf]}) table = read_basic(text, parallel=parallel) assert table['A'].dtype.kind == 'f' assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_fill_values(parallel, read_basic): """ Make sure that the parameter fill_values works as intended. If fill_values is not specified, the default behavior should be to convert '' to 0. """ text = """ A, B, C , 2, nan a, -999, -3.4 nan, 5, -9999 8, nan, 7.6e12 """ table = read_basic(text, delimiter=',', parallel=parallel) # The empty value in row A should become a masked '0' assert isinstance(table['A'], MaskedColumn) assert table['A'][0] is ma.masked # '0' rather than 0 because there is a string in the column assert_equal(table['A'].data.data[0], '0') assert table['A'][1] is not ma.masked table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel) assert isinstance(table['B'], MaskedColumn) assert table['A'][0] is not ma.masked # empty value unaffected assert table['C'][2] is not ma.masked # -9999 is not an exact match assert table['B'][1] is ma.masked # Numeric because the rest of the column contains numeric data assert_equal(table['B'].data.data[1], 0.0) assert table['B'][0] is not ma.masked table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel) # None of the columns should be masked for name in 'ABC': assert not isinstance(table[name], MaskedColumn) table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'), ('nan', '999', 'A', 'C')], parallel=parallel) assert np.isnan(table['B'][3]) # nan filling skips column B assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan assert table['A'][0] is ma.masked assert table['A'][2] is ma.masked assert_equal(table['A'].data.data[0], '0') assert_equal(table['A'].data.data[2], '999') assert table['C'][0] is ma.masked assert_almost_equal(table['C'].data.data[0], 999.0) assert_almost_equal(table['C'][1], -3.4) # column is still of type float @pytest.mark.parametrize("parallel", [True, False]) def test_fill_include_exclude_names(parallel, read_csv): """ fill_include_names and fill_exclude_names should filter missing/empty value handling in the same way that include_names and exclude_names filter output columns. """ text = """ A, B, C , 1, 2 3, , 4 5, 5, """ table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel) assert table['A'][0] is ma.masked assert table['B'][1] is ma.masked assert table['C'][2] is not ma.masked # C not in fill_include_names table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel) assert table['C'][2] is ma.masked assert table['A'][0] is not ma.masked assert table['B'][1] is not ma.masked # A and B excluded from fill handling table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel) assert table['A'][0] is ma.masked assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names assert table['C'][2] is not ma.masked @pytest.mark.parametrize("parallel", [True, False]) def test_many_rows(parallel, read_basic): """ Make sure memory reallocation works okay when the number of rows is large (so that each column string is longer than INITIAL_COL_SIZE). """ text = 'A B C\n' for i in range(500): # create 500 rows text += ' '.join([str(i) for i in range(3)]) text += '\n' table = read_basic(text, parallel=parallel) expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_many_columns(parallel, read_basic): """ Make sure memory reallocation works okay when the number of columns is large (so that each header string is longer than INITIAL_HEADER_SIZE). """ # create a string with 500 columns and two data rows text = ' '.join([str(i) for i in range(500)]) text += ('\n' + text + '\n' + text) table = read_basic(text, parallel=parallel) expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) assert_table_equal(table, expected) def test_fast_reader(): """ Make sure that ascii.read() works as expected by default and with fast_reader specified. """ text = 'a b c\n1 2 3\n4 5 6' with pytest.raises(ParameterError): # C reader can't handle regex comment ascii.read(text, format='fast_basic', guess=False, comment='##') # Enable multiprocessing and the fast converter try: ascii.read(text, format='basic', guess=False, fast_reader={'parallel': True, 'use_fast_converter': True}) except NotImplementedError: # Might get this on Windows, try without parallel... if os.name == 'nt': ascii.read(text, format='basic', guess=False, fast_reader={'parallel': False, 'use_fast_converter': True}) else: raise # Should raise an error if fast_reader has an invalid key with pytest.raises(FastOptionsError): ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True}) # Use the slow reader instead ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False) # Will try the slow reader afterwards by default ascii.read(text, format='basic', guess=False, comment='##') @pytest.mark.parametrize("parallel", [True, False]) def test_read_tab(parallel, read_tab): """ The fast reader for tab-separated values should not strip whitespace, unlike the basic reader. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t ' table = read_tab(text, parallel=parallel) assert_equal(table['1'][0], ' a') # preserve line whitespace assert_equal(table['2'][0], ' b ') # preserve field whitespace assert table['3'][0] is ma.masked # empty value should be masked assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace @pytest.mark.parametrize("parallel", [True, False]) def test_default_data_start(parallel, read_basic): """ If data_start is not explicitly passed to read(), data processing should beginning right after the header. """ text = 'ignore this line\na b c\n1 2 3\n4 5 6' table = read_basic(text, header_start=1, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_commented_header(parallel, read_commented_header): """ The FastCommentedHeader reader should mimic the behavior of the CommentedHeader by overriding the default header behavior of FastBasic. """ text = """ # A B C 1 2 3 4 5 6 """ t1 = read_commented_header(text, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(t1, expected) text = '# first commented line\n # second commented line\n\n' + text t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel) assert_table_equal(t2, expected) t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed assert_table_equal(t3, expected) text += '7 8 9' t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel) expected = Table([[7], [8], [9]], names=('A', 'B', 'C')) assert_table_equal(t4, expected) with pytest.raises(ParameterError): read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative @pytest.mark.parametrize("parallel", [True, False]) def test_rdb(parallel, read_rdb): """ Make sure the FastRdb reader works as expected. """ text = """ A\tB\tC 1n\tS\t4N 1\t 9\t4.3 """ table = read_rdb(text, parallel=parallel) expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C')) assert_table_equal(table, expected) assert_equal(table['A'].dtype.kind, 'i') assert table['B'].dtype.kind in ('S', 'U') assert_equal(table['C'].dtype.kind, 'f') with pytest.raises(ValueError) as e: text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data read_rdb(text, parallel=parallel) assert 'Column C failed to convert' in str(e) with pytest.raises(ValueError) as e: text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified read_rdb(text, parallel=parallel) assert 'mismatch between number of column names and column types' in str(e) with pytest.raises(ValueError) as e: text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C read_rdb(text, parallel=parallel) assert 'type definitions do not all match [num](N|S)' in str(e) @pytest.mark.parametrize("parallel", [True, False]) def test_data_start(parallel, read_basic): """ Make sure that data parsing begins at data_start (ignoring empty and commented lines but not taking quoted values into account). """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = """ A B C 1 2 3 4 5 6 7 8 "9 \t1" # comment 10 11 12 """ table = read_basic(text, data_start=2, parallel=parallel) expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C')) assert_table_equal(table, expected) table = read_basic(text, data_start=3, parallel=parallel) # ignore empty line expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C')) assert_table_equal(table, expected) with pytest.raises(CParserError) as e: # tries to begin in the middle of quoted field read_basic(text, data_start=4, parallel=parallel) assert 'not enough columns found in line 1 of data' in str(e) table = read_basic(text, data_start=5, parallel=parallel) # ignore commented line expected = Table([[10], [11], [12]], names=('A', 'B', 'C')) assert_table_equal(table, expected) text = """ A B C 1 2 3 4 5 6 7 8 9 # comment 10 11 12 """ # make sure reading works as expected in parallel table = read_basic(text, data_start=2, parallel=parallel) expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_quoted_empty_values(parallel, read_basic): """ Quoted empty values spanning multiple lines should be treated correctly. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = 'a b c\n1 2 " \n "' table = read_basic(text, parallel=parallel) assert table['c'][0] is ma.masked # empty value masked by default @pytest.mark.parametrize("parallel", [True, False]) def test_csv_comment_default(parallel, read_csv): """ Unless the comment parameter is specified, the CSV reader should not treat any lines as comments. """ text = 'a,b,c\n#1,2,3\n4,5,6' table = read_csv(text, parallel=parallel) expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_whitespace_before_comment(parallel, read_tab): """ Readers that don't strip whitespace from data (Tab, RDB) should still treat lines with leading whitespace and then the comment char as comment lines. """ text = 'a\tb\tc\n # comment line\n1\t2\t3' table = read_tab(text, parallel=parallel) expected = Table([[1], [2], [3]], names=('a', 'b', 'c')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_strip_line_trailing_whitespace(parallel, read_basic): """ Readers that strip whitespace from lines should ignore trailing whitespace after the last data value of each row. """ text = 'a b c\n1 2 \n3 4 5' with pytest.raises(CParserError) as e: ascii.read(StringIO(text), format='fast_basic', guess=False) assert 'not enough columns found in line 1' in str(e) text = 'a b c\n 1 2 3 \t \n 4 5 6 ' table = read_basic(text, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_data(parallel, read_basic): """ As long as column names are supplied, the C reader should return an empty table in the absence of data. """ table = read_basic('a b c', parallel=parallel) expected = Table([[], [], []], names=('a', 'b', 'c')) assert_table_equal(table, expected) table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_line_endings(parallel, read_basic, read_commented_header, read_rdb): """ Make sure the fast reader accepts CR and CR+LF as newlines. """ text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n' expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c')) for newline in ('\r\n', '\r'): table = read_basic(text.replace('\n', newline), parallel=parallel) assert_table_equal(table, expected) # Make sure the splitlines() method of FileString # works with CR/CR+LF line endings text = '#' + text for newline in ('\r\n', '\r'): table = read_commented_header(text.replace('\n', newline), parallel=parallel) assert_table_equal(table, expected) expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True) expected['a'][0] = np.ma.masked expected['c'][0] = np.ma.masked text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n' for newline in ('\r\n', '\r'): table = read_rdb(text.replace('\n', newline), parallel=parallel) assert_table_equal(table, expected) assert np.all(table == expected) @pytest.mark.parametrize("parallel", [True, False]) def test_store_comments(parallel, read_basic): """ Make sure that the output Table produced by the fast reader stores any comment lines in its meta attribute. """ text = """ # header comment a b c # comment 2 # comment 3 1 2 3 4 5 6 """ table = read_basic(text, parallel=parallel, check_meta=True) assert_equal(table.meta['comments'], ['header comment', 'comment 2', 'comment 3']) @pytest.mark.parametrize("parallel", [True, False]) def test_empty_quotes(parallel, read_basic): """ Make sure the C reader doesn't segfault when the input data contains empty quotes. [#3407] """ table = read_basic('a b\n1 ""\n2 ""', parallel=parallel) expected = Table([[1, 2], [0, 0]], names=('a', 'b')) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_fast_tab_with_names(parallel, read_tab): """ Make sure the C reader doesn't segfault when the header for the first column is missing [#3545] """ content = """# \tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot -3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" head = ['A{0}'.format(i) for i in range(28)] table = read_tab(content, data_start=1, parallel=parallel, names=head) @pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'), reason='Environment variable TEST_READ_HUGE_FILE must be ' 'defined to run this test') def test_read_big_table(tmpdir): """Test reading of a huge file. This test generates a huge CSV file (~2.3Gb) before reading it (see https://github.com/astropy/astropy/pull/5319). The test is run only if the environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running the test requires quite a lot of memory (~18Gb when reading the file) !! """ NB_ROWS = 250000 NB_COLS = 500 filename = str(tmpdir.join("big_table.csv")) print("Creating a {} rows table ({} columns).".format(NB_ROWS, NB_COLS)) data = np.random.random(NB_ROWS) t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)]) data = None print("Saving the table to {}".format(filename)) t.write(filename, format='ascii.csv', overwrite=True) t = None print("Counting the number of lines in the csv, it should be {}" " + 1 (header).".format(NB_ROWS)) assert sum(1 for line in open(filename)) == NB_ROWS + 1 print("Reading the file with astropy.") t = Table.read(filename, format='ascii.csv', fast_reader=True) assert len(t) == NB_ROWS # fast_reader configurations: False| 'use_fast_converter'=False|True @pytest.mark.parametrize('reader', [0, 1, 2]) # catch Windows environment since we cannot use _read() with custom fast_reader @pytest.mark.parametrize("parallel", [False, pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows"))]) def test_data_out_of_range(parallel, reader): """ Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) shall be returned as 0 and +-inf respectively by the C parser, just like the Python parser. Test fast converter only to nominal accuracy. """ # Python reader and strtod() are expected to return precise results rtol = 1.e-30 if reader > 1: rtol = 1.e-15 # passing fast_reader dict with parametrize does not work! if reader > 0: fast_reader = {'parallel': parallel, 'use_fast_converter': reader > 1} else: fast_reader = False if parallel: if reader < 1: pytest.skip("Multiprocessing only available in fast reader") elif TRAVIS: pytest.xfail("Multiprocessing can sometimes fail on Travis CI") fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345'] values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf]) t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, fast_reader=fast_reader) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) # test some additional corner cases fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305', '0.4e-324', '2500e-327', ' 0.0000000000000000000001024E+330'] values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308]) t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, fast_reader=fast_reader) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) # test corner cases again with non-standard exponent_style (auto-detection) if reader < 2: pytest.skip("Fortran exponent style only available in fast converter") fast_reader.update({'exponent_style': 'A'}) fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305', '0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330'] t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, fast_reader=fast_reader) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324) # catch Windows environment since we cannot use _read() with custom fast_reader @pytest.mark.parametrize("parallel", [ pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")), False]) def test_int_out_of_range(parallel): """ Integer numbers outside int range shall be returned as string columns consistent with the standard (Python) parser (no 'upcasting' to float). """ imin = np.iinfo(int).min+1 imax = np.iinfo(int).max-1 huge = '{:d}'.format(imax+2) text = 'P M S\n {:d} {:d} {:s}'.format(imax, imin, huge) expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S')) table = ascii.read(text, format='basic', guess=False, fast_reader={'parallel': parallel}) assert_table_equal(table, expected) # check with leading zeroes to make sure strtol does not read them as octal text = 'P M S\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge) expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S')) table = ascii.read(text, format='basic', guess=False, fast_reader={'parallel': parallel}) assert_table_equal(table, expected) # mixed columns should be returned as float, but if the out-of-range integer # shows up first, it will produce a string column - with both readers pytest.xfail("Integer fallback depends on order of rows") text = 'A B\n 12.3 {0:d}9\n {0:d}9 45.6e7'.format(imax) expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]], names=('A', 'B')) table = ascii.read(text, format='basic', guess=False, fast_reader={'parallel': parallel}) assert_table_equal(table, expected) table = ascii.read(text, format='basic', guess=False, fast_reader=False) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [ pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")), False]) def test_fortran_reader(parallel): """ Make sure that ascii.read() can read Fortran-style exponential notation using the fast_reader. """ text = 'A B C\n100.01{:s}+99 2.0 3\n 4.2{:s}-1 5.0{:s}-1 0.6{:s}4' expected = Table([[1.0001e101, 0.42], [2, 0.5], [3.0, 6000]], names=('A', 'B', 'C')) expstyles = {'e': 4*('E'), 'D': ('D', 'd', 'd', 'D'), 'Q': 2*('q', 'Q'), 'fortran': ('D', 'E', 'Q', 'd')} # C strtod (not-fast converter) can't handle Fortran exp with pytest.raises(FastOptionsError) as e: ascii.read(text.format(*(4*('D'))), format='basic', guess=False, fast_reader={'use_fast_converter': False, 'parallel': parallel, 'exponent_style': 'D'}) assert 'fast_reader: exponent_style requires use_fast_converter' in str(e) # enable multiprocessing and the fast converter # iterate over all style-exponent combinations for s, c in expstyles.items(): table = ascii.read(text.format(*c), format='basic', guess=False, fast_reader={'parallel': parallel, 'exponent_style': s}) assert_table_equal(table, expected) # mixes and triple-exponents without any character using autodetect option text = 'A B C\n1.0001+101 2.0E0 3\n.42d0 0.5 6.+003' table = ascii.read(text, format='basic', guess=False, fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) assert_table_equal(table, expected) # additional corner-case checks text = 'A B C\n1.0001+101 2.0+000 3\n0.42+000 0.5 6000.-000' table = ascii.read(text, format='basic', guess=False, fast_reader={'parallel': parallel, 'exponent_style': 'fortran'}) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [ pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")), False]) def test_fortran_invalid_exp(parallel): """ Test Fortran-style exponential notation in the fast_reader with invalid exponent-like patterns (no triple-digits) to make sure they are returned as strings instead, as with the standard C parser. """ if parallel and TRAVIS: pytest.xfail("Multiprocessing can sometimes fail on Travis CI") fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.', '2', '4.56e-2.3', '8000', '4.2-122'] values = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, 4.2e-122] t = ascii.read(StringIO(' '.join(fields)), format='no_header', guess=False, fast_reader={'parallel': parallel, 'exponent_style': 'A'}) read_values = [col[0] for col in t.itercols()] assert read_values == values
855fc3cd24d3ff5093b9ae6d236ed34236c0706129fc35cebba6f5695fc057cc
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import pytest from ... import ascii from ..core import InconsistentTableError from .common import (assert_equal, assert_almost_equal) def assert_equal_splitlines(arg1, arg2): assert_equal(arg1.splitlines(), arg2.splitlines()) def test_read_normal(): """Nice, typical fixed format table""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = ascii.get_reader(Reader=ascii.FixedWidth) dat = reader.read(table) assert_equal(dat.colnames, ['Col1', 'Col2']) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_normal_names(): """Nice, typical fixed format table with col names provided""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = ascii.get_reader(Reader=ascii.FixedWidth, names=('name1', 'name2')) dat = reader.read(table) assert_equal(dat.colnames, ['name1', 'name2']) assert_almost_equal(dat[1][0], 2.4) def test_read_normal_names_include(): """Nice, typical fixed format table with col names provided""" table = """ # comment (with blank line above) | Col1 | Col2 | Col3 | | 1.2 | "hello" | 3 | | 2.4 |'s worlds| 7 | """ reader = ascii.get_reader(Reader=ascii.FixedWidth, names=('name1', 'name2', 'name3'), include_names=('name1', 'name3')) dat = reader.read(table) assert_equal(dat.colnames, ['name1', 'name3']) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], 3) def test_read_normal_exclude(): """Nice, typical fixed format table with col name excluded""" table = """ # comment (with blank line above) | Col1 | Col2 | | 1.2 | "hello" | | 2.4 |'s worlds| """ reader = ascii.get_reader(Reader=ascii.FixedWidth, exclude_names=('Col1',)) dat = reader.read(table) assert_equal(dat.colnames, ['Col2']) assert_equal(dat[1][0], "'s worlds") def test_read_weird(): """Weird input table with data values chopped by col extent """ table = """ Col1 | Col2 | 1.2 "hello" 2.4 sdf's worlds """ reader = ascii.get_reader(Reader=ascii.FixedWidth) dat = reader.read(table) assert_equal(dat.colnames, ['Col1', 'Col2']) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hel') assert_equal(dat[1][1], "df's wo") def test_read_double(): """Table with double delimiters""" table = """ || Name || Phone || TCP|| | John | 555-1234 |192.168.1.10X| | Mary | 555-2134 |192.168.1.12X| | Bob | 555-4527 | 192.168.1.9X| """ dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False) assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_space_delimiter(): """Table with space delimiter""" table = """ Name --Phone- ----TCP----- John 555-1234 192.168.1.10 Mary 555-2134 192.168.1.12 Bob 555-4527 192.168.1.9 """ dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, delimiter=' ') assert_equal(tuple(dat.dtype.names), ('Name', '--Phone-', '----TCP-----')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_no_header_autocolumn(): """Table with no header row and auto-column naming""" table = """ | John | 555-1234 |192.168.1.10| | Mary | 555-2134 |192.168.1.12| | Bob | 555-4527 | 192.168.1.9| """ dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, header_start=None, data_start=0) assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_no_header_names(): """Table with no header row and with col names provided. Second and third rows also have hanging spaces after final |.""" table = """ | John | 555-1234 |192.168.1.10| | Mary | 555-2134 |192.168.1.12| | Bob | 555-4527 | 192.168.1.9| """ dat = ascii.read(table, Reader=ascii.FixedWidth, guess=False, header_start=None, data_start=0, names=('Name', 'Phone', 'TCP')) assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_no_header_autocolumn_NoHeader(): """Table with no header row and auto-column naming""" table = """ | John | 555-1234 |192.168.1.10| | Mary | 555-2134 |192.168.1.12| | Bob | 555-4527 | 192.168.1.9| """ dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader) assert_equal(tuple(dat.dtype.names), ('col1', 'col2', 'col3')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_no_header_names_NoHeader(): """Table with no header row and with col names provided. Second and third rows also have hanging spaces after final |.""" table = """ | John | 555-1234 |192.168.1.10| | Mary | 555-2134 |192.168.1.12| | Bob | 555-4527 | 192.168.1.9| """ dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, names=('Name', 'Phone', 'TCP')) assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) assert_equal(dat[1][0], "Mary") assert_equal(dat[0][1], "555-1234") assert_equal(dat[2][2], "192.168.1.9") def test_read_col_starts(): """Table with no delimiter with column start and end values specified.""" table = """ # 5 9 17 18 28 # | | || | John 555- 1234 192.168.1.10 Mary 555- 2134 192.168.1.12 Bob 555- 4527 192.168.1.9 """ dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, names=('Name', 'Phone', 'TCP'), col_starts=(0, 9, 18), col_ends=(5, 17, 28), ) assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) assert_equal(dat[0][1], "555- 1234") assert_equal(dat[1][0], "Mary") assert_equal(dat[1][2], "192.168.1.") assert_equal(dat[2][2], "192.168.1") # col_end=28 cuts this column off def test_read_detect_col_starts_or_ends(): """Table with no delimiter with only column start or end values specified""" table = """ #1 9 19 <== Column start indexes #| | | <== Column start positions #<------><--------><-------------> <== Inferred column positions John 555- 1234 192.168.1.10 Mary 555- 2134 192.168.1.123 Bob 555- 4527 192.168.1.9 Bill 555-9875 192.255.255.255 """ for kwargs in ({'col_starts': (1, 9, 19)}, {'col_ends': (8, 18, 33)}): dat = ascii.read(table, Reader=ascii.FixedWidthNoHeader, names=('Name', 'Phone', 'TCP'), **kwargs) assert_equal(tuple(dat.dtype.names), ('Name', 'Phone', 'TCP')) assert_equal(dat[0][1], "555- 1234") assert_equal(dat[1][0], "Mary") assert_equal(dat[1][2], "192.168.1.123") assert_equal(dat[3][2], "192.255.255.255") table = """\ | Col1 | Col2 | Col3 | Col4 | | 1.2 | "hello" | 1 | a | | 2.4 | 's worlds | 2 | 2 | """ dat = ascii.read(table, Reader=ascii.FixedWidth) def test_write_normal(): """Write a table as a normal fixed width table.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth) assert_equal_splitlines(out.getvalue(), """\ | Col1 | Col2 | Col3 | Col4 | | 1.2 | "hello" | 1 | a | | 2.4 | 's worlds | 2 | 2 | """) def test_write_fill_values(): """Write a table as a normal fixed width table.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, fill_values=('a', 'N/A')) assert_equal_splitlines(out.getvalue(), """\ | Col1 | Col2 | Col3 | Col4 | | 1.2 | "hello" | 1 | N/A | | 2.4 | 's worlds | 2 | 2 | """) def test_write_no_pad(): """Write a table as a fixed width table with no padding.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, delimiter_pad=None) assert_equal_splitlines(out.getvalue(), """\ |Col1| Col2|Col3|Col4| | 1.2| "hello"| 1| a| | 2.4|'s worlds| 2| 2| """) def test_write_no_bookend(): """Write a table as a fixed width table with no bookend.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False) assert_equal_splitlines(out.getvalue(), """\ Col1 | Col2 | Col3 | Col4 1.2 | "hello" | 1 | a 2.4 | 's worlds | 2 | 2 """) def test_write_no_delimiter(): """Write a table as a fixed width table with no delimiter.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, bookend=False, delimiter=None) assert_equal_splitlines(out.getvalue(), """\ Col1 Col2 Col3 Col4 1.2 "hello" 1 a 2.4 's worlds 2 2 """) def test_write_noheader_normal(): """Write a table as a normal fixed width table.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader) assert_equal_splitlines(out.getvalue(), """\ | 1.2 | "hello" | 1 | a | | 2.4 | 's worlds | 2 | 2 | """) def test_write_noheader_no_pad(): """Write a table as a fixed width table with no padding.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, delimiter_pad=None) assert_equal_splitlines(out.getvalue(), """\ |1.2| "hello"|1|a| |2.4|'s worlds|2|2| """) def test_write_noheader_no_bookend(): """Write a table as a fixed width table with no bookend.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, bookend=False) assert_equal_splitlines(out.getvalue(), """\ 1.2 | "hello" | 1 | a 2.4 | 's worlds | 2 | 2 """) def test_write_noheader_no_delimiter(): """Write a table as a fixed width table with no delimiter.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthNoHeader, bookend=False, delimiter=None) assert_equal_splitlines(out.getvalue(), """\ 1.2 "hello" 1 a 2.4 's worlds 2 2 """) def test_write_formats(): """Write a table as a fixed width table with no delimiter.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidth, formats={'Col1': '%-8.3f', 'Col2': '%-15s'}) assert_equal_splitlines(out.getvalue(), """\ | Col1 | Col2 | Col3 | Col4 | | 1.200 | "hello" | 1 | a | | 2.400 | 's worlds | 2 | 2 | """) def test_read_twoline_normal(): """Typical fixed format table with two header lines (with some cruft thrown in to test column positioning""" table = """ Col1 Col2 ---- --------- 1.2xx"hello" 2.4 's worlds """ dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert_equal(dat.dtype.names, ('Col1', 'Col2')) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_twoline_ReST(): """Read restructured text table""" table = """ ======= =========== Col1 Col2 ======= =========== 1.2 "hello" 2.4 's worlds ======= =========== """ dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, header_start=1, position_line=2, data_end=-1) assert_equal(dat.dtype.names, ('Col1', 'Col2')) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_twoline_human(): """Read text table designed for humans and test having position line before the header line""" table = """ +------+----------+ | Col1 | Col2 | +------|----------+ | 1.2 | "hello" | | 2.4 | 's worlds| +------+----------+ """ dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, delimiter='+', header_start=1, position_line=0, data_start=3, data_end=-1) assert_equal(dat.dtype.names, ('Col1', 'Col2')) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_twoline_fail(): """Test failure if too many different character are on position line. The position line shall consist of only one character in addition to the delimiter. """ table = """ | Col1 | Col2 | |------|==========| | 1.2 | "hello" | | 2.4 | 's worlds| """ with pytest.raises(InconsistentTableError) as excinfo: dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, delimiter='|', guess=False) assert 'Position line should only contain delimiters and one other character' in str(excinfo.value) def test_read_twoline_wrong_marker(): '''Test failure when position line uses characters prone to ambiguity Characters in position line must be part an allowed set because normal letters or numbers will lead to ambiguous tables. ''' table = """ | Col1 | Col2 | |aaaaaa|aaaaaaaaaa| | 1.2 | "hello" | | 2.4 | 's worlds| """ with pytest.raises(InconsistentTableError) as excinfo: dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, delimiter='|', guess=False) assert 'Characters in position line must be part' in str(excinfo.value) def test_write_twoline_normal(): """Write a table as a normal fixed width table.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine) assert_equal_splitlines(out.getvalue(), """\ Col1 Col2 Col3 Col4 ---- --------- ---- ---- 1.2 "hello" 1 a 2.4 's worlds 2 2 """) def test_write_twoline_no_pad(): """Write a table as a fixed width table with no padding.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, delimiter_pad=' ', position_char='=') assert_equal_splitlines(out.getvalue(), """\ Col1 Col2 Col3 Col4 ==== ========= ==== ==== 1.2 "hello" 1 a 2.4 's worlds 2 2 """) def test_write_twoline_no_bookend(): """Write a table as a fixed width table with no bookend.""" out = StringIO() ascii.write(dat, out, Writer=ascii.FixedWidthTwoLine, bookend=True, delimiter='|') assert_equal_splitlines(out.getvalue(), """\ |Col1| Col2|Col3|Col4| |----|---------|----|----| | 1.2| "hello"| 1| a| | 2.4|'s worlds| 2| 2| """)
2b9d05b459d10b68d4662bc1851c375195c03ca06a87adaf46130feb8f41b1d0
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import pytest from ..ui import read from ..ipac import Ipac, IpacFormatError, IpacFormatErrorDBMS from ....tests.helper import catch_warnings from ... import ascii from ....table import Table from ..core import masked DATA = ''' | a | b | | char | char | ABBBBBBABBBBBBBA ''' def test_ipac_default(): # default should be ignore table = read(DATA, Reader=Ipac) assert table['a'][0] == 'BBBBBB' assert table['b'][0] == 'BBBBBBB' def test_ipac_ignore(): table = read(DATA, Reader=Ipac, definition='ignore') assert table['a'][0] == 'BBBBBB' assert table['b'][0] == 'BBBBBBB' def test_ipac_left(): table = read(DATA, Reader=Ipac, definition='left') assert table['a'][0] == 'BBBBBBA' assert table['b'][0] == 'BBBBBBBA' def test_ipac_right(): table = read(DATA, Reader=Ipac, definition='right') assert table['a'][0] == 'ABBBBBB' assert table['b'][0] == 'ABBBBBBB' def test_too_long_colname_default(): table = Table([[3]], names=['a1234567890123456789012345678901234567890']) out = StringIO() with pytest.raises(IpacFormatError): ascii.write(table, out, Writer=Ipac) def test_too_long_colname_strict(): table = Table([[3]], names=['a1234567890123456']) out = StringIO() with pytest.raises(IpacFormatErrorDBMS): ascii.write(table, out, Writer=Ipac, DBMS=True) def test_too_long_colname_notstrict(): table = Table([[3]], names=['a1234567890123456789012345678901234567890']) out = StringIO() with pytest.raises(IpacFormatError): ascii.write(table, out, Writer=Ipac, DBMS=False) @pytest.mark.parametrize(("strict_", "Err"), [(True, IpacFormatErrorDBMS), (False, IpacFormatError)]) def test_non_alfnum_colname(strict_, Err): table = Table([[3]], names=['a123456789 01234']) out = StringIO() with pytest.raises(Err): ascii.write(table, out, Writer=Ipac, DBMS=strict_) def test_colname_starswithnumber_strict(): table = Table([[3]], names=['a123456789 01234']) out = StringIO() with pytest.raises(IpacFormatErrorDBMS): ascii.write(table, out, Writer=Ipac, DBMS=True) def test_double_colname_strict(): table = Table([[3], [1]], names=['DEC', 'dec']) out = StringIO() with pytest.raises(IpacFormatErrorDBMS): ascii.write(table, out, Writer=Ipac, DBMS=True) @pytest.mark.parametrize('colname', ['x', 'y', 'z', 'X', 'Y', 'Z']) def test_reserved_colname_strict(colname): table = Table([['reg']], names=[colname]) out = StringIO() with pytest.raises(IpacFormatErrorDBMS): ascii.write(table, out, Writer=Ipac, DBMS=True) def test_too_long_comment(): with catch_warnings(UserWarning) as w: table = Table([[3]]) table.meta['comments'] = ['a' * 79] out = StringIO() ascii.write(table, out, Writer=Ipac) w = w[0] assert 'Comment string > 78 characters was automatically wrapped.' == str(w.message) expected_out = """\ \\ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \\ a |col0| |long| | | |null| 3 """ assert out.getvalue().strip().splitlines() == expected_out.splitlines() def test_out_with_nonstring_null(): '''Test a (non-string) fill value. Even for an unmasked tables, the fill_value should show up in the table header. ''' table = Table([[3]], masked=True) out = StringIO() ascii.write(table, out, Writer=Ipac, fill_values=[(masked, -99999)]) expected_out = """\ | col0| | long| | | |-99999| 3 """ assert out.getvalue().strip().splitlines() == expected_out.splitlines() def test_include_exclude_names(): table = Table([[1], [2], [3]], names=('A', 'B', 'C')) out = StringIO() ascii.write(table, out, Writer=Ipac, include_names=('A', 'B'), exclude_names=('A',)) # column B should be the only included column in output expected_out = """\ | B| |long| | | |null| 2 """ assert out.getvalue().strip().splitlines() == expected_out.splitlines()
e4c25b78fdd3e47f88b9404ce90b82cc09c13b2ee7d7afff55af89a32f4dc99f
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``HTML`` reader/writer and aims to document its functionality. Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ to be installed. """ from io import StringIO from .. import html from .. import core from ....table import Table import pytest import numpy as np from .common import setup_function, teardown_function from ... import ascii from ....utils.xml.writer import HAS_BLEACH # Check to see if the BeautifulSoup dependency is present. try: from bs4 import BeautifulSoup, FeatureNotFound HAS_BEAUTIFUL_SOUP = True except ImportError: HAS_BEAUTIFUL_SOUP = False @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_soupstring(): """ Test to make sure the class SoupString behaves properly. """ soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>') soup_str = html.SoupString(soup) assert isinstance(soup_str, str) assert isinstance(soup_str, html.SoupString) assert soup_str == '<html><head></head><body><p>foo</p></body></html>' assert soup_str.soup is soup def test_listwriter(): """ Test to make sure the class ListWriter behaves properly. """ lst = [] writer = html.ListWriter(lst) for i in range(5): writer.write(i) for ch in 'abcde': writer.write(ch) assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e'] @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_identify_table(): """ Test to make sure that identify_table() returns whether the given BeautifulSoup tag is the correct table to process. """ # Should return False on non-<table> tags and None soup = BeautifulSoup('<html><body></body></html>') assert html.identify_table(soup, {}, 0) is False assert html.identify_table(None, {}, 0) is False soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>' '<td>B</td></tr></table>').table assert html.identify_table(soup, {}, 2) is False assert html.identify_table(soup, {}, 1) is True # Default index of 1 # Same tests, but with explicit parameter assert html.identify_table(soup, {'table_id': 2}, 1) is False assert html.identify_table(soup, {'table_id': 1}, 1) is True # Test identification by string ID assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_missing_data(): """ Test reading a table with missing data """ # First with default where blank => '0' table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td></td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.masked is True assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' # Now with a specific value '...' => missing table_in = ['<table>', '<tr><th>A</th></tr>', '<tr><td>...</td></tr>', '<tr><td>1</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')]) assert dat.masked is True assert np.all(dat['A'].mask == [True, False]) assert dat['A'].dtype.kind == 'i' @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_rename_cols(): """ Test reading a table and renaming cols """ table_in = ['<table>', '<tr><th>A</th> <th>B</th></tr>', '<tr><td>1</td><td>2</td></tr>', '</table>'] # Swap column names dat = Table.read(table_in, format='ascii.html', names=['B', 'A']) assert dat.colnames == ['B', 'A'] assert len(dat) == 1 # Swap column names and only include A (the renamed version) dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A']) assert dat.colnames == ['A'] assert len(dat) == 1 assert np.all(dat['A'] == 2) @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_no_names(): """ Test reading a table witn no column header """ table_in = ['<table>', '<tr><td>1</td></tr>', '<tr><td>2</td></tr>', '</table>'] dat = Table.read(table_in, format='ascii.html') assert dat.colnames == ['col1'] assert len(dat) == 2 dat = Table.read(table_in, format='ascii.html', names=['a']) assert dat.colnames == ['a'] assert len(dat) == 2 @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_identify_table_fail(): """ Raise an exception with an informative error message if table_id is not found. """ table_in = ['<table id="foo"><tr><th>A</th></tr>', '<tr><td>B</td></tr></table>'] with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'}, guess=False) assert str(err).endswith("ERROR: HTML table id 'bad_id' not found") with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format='ascii.html', htmldict={'table_id': 3}, guess=False) assert str(err).endswith("ERROR: HTML table number 3 not found") @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_backend_parsers(): """ Make sure the user can specify which back-end parser to use and that an error is raised if the parser is invalid. """ for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): try: table = Table.read('t/html2.html', format='ascii.html', htmldict={'parser': parser}, guess=False) except FeatureNotFound: if parser == 'html.parser': raise # otherwise ignore if the dependency isn't present # reading should fail if the parser is invalid with pytest.raises(FeatureNotFound): Table.read('t/html2.html', format='ascii.html', htmldict={'parser': 'foo'}, guess=False) @pytest.mark.skipif('HAS_BEAUTIFUL_SOUP') def test_htmlinputter_no_bs4(): """ This should return an OptionalTableImportError if BeautifulSoup is not installed. """ inputter = html.HTMLInputter() with pytest.raises(core.OptionalTableImportError): inputter.process_lines([]) @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_htmlinputter(): """ Test to ensure that HTMLInputter correctly converts input into a list of SoupStrings representing table elements. """ f = 't/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} # In absence of table_id, defaults to the first table expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>', '<tr><td>1</td><td>a</td><td>1.05</td></tr>', '<tr><td>2</td><td>b</td><td>2.75</td></tr>', '<tr><td>3</td><td>c</td><td>-1.25</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Should raise an InconsistentTableError if the table is not found inputter.html = {'table_id': 4} with pytest.raises(core.InconsistentTableError): inputter.get_lines(table) # Identification by string ID inputter.html['table_id'] = 'second' expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>', '<tr><td>4</td><td>d</td><td>10.5</td></tr>', '<tr><td>5</td><td>e</td><td>27.5</td></tr>', '<tr><td>6</td><td>f</td><td>-12.5</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected # Identification by integer index inputter.html['table_id'] = 3 expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>', '<tr><td>7</td><td>g</td><td>105.0</td></tr>', '<tr><td>8</td><td>h</td><td>275.0</td></tr>', '<tr><td>9</td><td>i</td><td>-125.0</td></tr>'] assert [str(x) for x in inputter.get_lines(table)] == expected @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_htmlsplitter(): """ Test to make sure that HTMLSplitter correctly inputs lines of type SoupString to return a generator that gives all header and data elements. """ splitter = html.HTMLSplitter() lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>').tr), html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>').tr)] expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']] assert list(splitter(lines)) == expected_data # Make sure the presence of a non-SoupString triggers a TypeError lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>') with pytest.raises(TypeError): list(splitter(lines)) # Make sure that passing an empty list triggers an error with pytest.raises(core.InconsistentTableError): list(splitter([])) @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_htmlheader_start(): """ Test to ensure that the start_line method of HTMLHeader returns the first line of header data. Uses t/html.html for sample input. """ f = 't/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} header = html.HTMLHeader() lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[header.start_line(lines)]) == \ '<tr><th>C1</th><th>C2</th><th>C3</th></tr>' # start_line should return None if no valid header is found lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>').tr), html.SoupString(BeautifulSoup('<p>Text</p>').p)] assert header.start_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><th>Header</th></tr>') with pytest.raises(TypeError): header.start_line(lines) @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_htmldata(): """ Test to ensure that the start_line and end_lines methods of HTMLData returns the first line of table data. Uses t/html.html for sample input. """ f = 't/html.html' with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} data = html.HTMLData() lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>1</td><td>a</td><td>1.05</td></tr>' # end_line returns the index of the last data element + 1 assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>3</td><td>c</td><td>-1.25</td></tr>' inputter.html['table_id'] = 'second' lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>4</td><td>d</td><td>10.5</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>6</td><td>f</td><td>-12.5</td></tr>' inputter.html['table_id'] = 3 lines = inputter.get_lines(table) assert str(lines[data.start_line(lines)]) == \ '<tr><td>7</td><td>g</td><td>105.0</td></tr>' assert str(lines[data.end_line(lines) - 1]) == \ '<tr><td>9</td><td>i</td><td>-125.0</td></tr>' # start_line should raise an error if no table data exists lines = [html.SoupString(BeautifulSoup('<div></div>').div), html.SoupString(BeautifulSoup('<p>Text</p>').p)] with pytest.raises(core.InconsistentTableError): data.start_line(lines) # end_line should return None if no table data exists assert data.end_line(lines) is None # Should raise an error if a non-SoupString is present lines.append('<tr><td>Data</td></tr>') with pytest.raises(TypeError): data.start_line(lines) with pytest.raises(TypeError): data.end_line(lines) def test_multicolumn_write(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td>a</td> <td>a</td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td>b</td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML().write(table)[0].strip() assert out == expected.strip() @pytest.mark.skipif('not HAS_BLEACH') def test_multicolumn_write_escape(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td><a></a></td> <td><a></a></td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td><b></b></td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip() assert out == expected.strip() def test_write_no_multicols(): """ Test to make sure that the HTML writer will not use multi-dimensional columns if the multicol parameter is False. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] table = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th>C2</th> <th>C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0 .. 1.0</td> <td>a .. a</td> </tr> <tr> <td>2</td> <td>2.0 .. 2.0</td> <td>b .. b</td> </tr> <tr> <td>3</td> <td>3.0 .. 3.0</td> <td>c .. c</td> </tr> </table> </body> </html> """ assert html.HTML({'multicol': False}).write(table)[0].strip() == \ expected.strip() @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_multicolumn_read(): """ Test to make sure that the HTML reader inputs multidimensional columns (those with iterable elements) using the colspan attribute of <th>. Ensure that any string element within a multidimensional column casts all elements to string prior to type conversion operations. """ table = Table.read('t/html2.html', format='ascii.html') str_type = np.dtype((str, 21)) expected = Table(np.array([(['1', '2.5000000000000000001'], 3), (['1a', '1'], 3.5)], dtype=[('A', str_type, (2,)), ('B', '<f8')])) assert np.all(table == expected) @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write(): """ Test that columns can contain raw HTML which is not escaped. """ t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b']) # One column contains raw HTML (string input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'}) expected = """\ <tr> <td><em>x</em></td> <td>&lt;em&gt;y&lt;/em&gt;</td> </tr>""" assert expected in out.getvalue() # One column contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']}) assert expected in out.getvalue() # Two columns contains raw HTML (list input) out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']}) expected = """\ <tr> <td><em>x</em></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() @pytest.mark.skipif('not HAS_BLEACH') def test_raw_html_write_clean(): """ Test that columns can contain raw HTML which is not escaped. """ import bleach t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c']) # Confirm that <script> and <p> get escaped but not <em> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td>&lt;p&gt;y&lt;/p&gt;</td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() # Confirm that we can whitelist <p> out = StringIO() t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames, 'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td><p>y</p></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() def test_write_table_html_fill_values(): """ Test that passing fill_values should replace any matching row """ buffer_output = StringIO() t = Table([[1], [2]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world'), format='html') t_expected = Table([['Hello world'], [2]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_optional_columns(): """ Test that passing optional column in fill_values should only replace matching columns """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'), format='html') t_expected = Table([[1], ['Hello world']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values """ buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8')) t['a'] = np.ma.masked ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'), format='html') t_expected = Table([['TEST'], [1]], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multicolumn_table_html_fill_values(): """ Test to make sure that the HTML writer writes multidimensional columns with correctly replaced fill_values. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_output = StringIO() t = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t, buffer_output, fill_values=('a', 'z'), format='html') col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')] buffer_expected = StringIO() t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3')) ascii.write(t_expected, buffer_expected, format='html') assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multi_column_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values for multidimensional tables """ buffer_output = StringIO() t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True) t['a'][0:2] = np.ma.masked t['b'][0:2] = np.ma.masked ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')], format='html') t_expected = Table([['MASKED', 'MASKED', 3, 4], ['MASKED', 'MASKED', '--', 'b']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') print(buffer_expected.getvalue()) assert buffer_output.getvalue() == buffer_expected.getvalue() @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_read_html_unicode(): """ Test reading an HTML table with unicode values """ table_in = [u'<table>', u'<tr><td>&#x0394;</td></tr>', u'<tr><td>Δ</td></tr>', u'</table>'] dat = Table.read(table_in, format='ascii.html') assert np.all(dat['col1'] == [u'Δ', u'Δ'])
1c5c4c6c061b09a6bf962ca1e4f2afdb3fefbcf95b5d4202fc2fb9f4b005a022
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import numpy as np __all__ = ['raises', 'assert_equal', 'assert_almost_equal', 'assert_true', 'setup_function', 'teardown_function', 'has_isnan'] CWD = os.getcwd() TEST_DIR = os.path.dirname(__file__) has_isnan = True try: from math import isnan # pylint: disable=W0611 except ImportError: try: from numpy import isnan # pylint: disable=W0611 except ImportError: has_isnan = False print('Tests requiring isnan will fail') def setup_function(function): os.chdir(TEST_DIR) def teardown_function(function): os.chdir(CWD) # Compatibility functions to convert from nose to py.test def assert_equal(a, b): assert a == b def assert_almost_equal(a, b, **kwargs): assert np.allclose(a, b, **kwargs) def assert_true(a): assert a def make_decorator(func): """ Wraps a test decorator so as to properly replicate metadata of the decorated function, including nose's additional stuff (namely, setup and teardown). """ def decorate(newfunc): if hasattr(func, 'compat_func_name'): name = func.compat_func_name else: name = func.__name__ newfunc.__dict__ = func.__dict__ newfunc.__doc__ = func.__doc__ newfunc.__module__ = func.__module__ if not hasattr(newfunc, 'compat_co_firstlineno'): try: newfunc.compat_co_firstlineno = func.func_code.co_firstlineno except AttributeError: newfunc.compat_co_firstlineno = func.__code__.co_firstlineno try: newfunc.__name__ = name except TypeError: # can't set func name in 2.3 newfunc.compat_func_name = name return newfunc return decorate def raises(*exceptions): """Test must raise one of expected exceptions to pass. Example use:: @raises(TypeError, ValueError) def test_raises_type_error(): raise TypeError("This test passes") @raises(Exception) def test_that_fails_by_passing(): pass If you want to test many assertions about exceptions in a single test, you may want to use `assert_raises` instead. """ valid = ' or '.join([e.__name__ for e in exceptions]) def decorate(func): name = func.__name__ def newfunc(*arg, **kw): try: func(*arg, **kw) except exceptions: pass else: message = "{}() did not raise {}".format(name, valid) raise AssertionError(message) newfunc = make_decorator(func)(newfunc) return newfunc return decorate
6c6e1e67ca760e48dd5868e03d8d1548fbac210d54605724b4201d3db41c41e4
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import sys import pytest import numpy as np from .. import read ROOT = os.path.abspath(os.path.dirname(__file__)) try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True try: import lzma except ImportError: HAS_XZ = False else: HAS_XZ = True @pytest.mark.parametrize('filename', ['t/daophot.dat.gz', 't/latex1.tex.gz', 't/short.rdb.gz']) def test_gzip(filename): t_comp = read(os.path.join(ROOT, filename)) t_uncomp = read(os.path.join(ROOT, filename.replace('.gz', ''))) assert t_comp.dtype.names == t_uncomp.dtype.names assert np.all(t_comp.as_array() == t_uncomp.as_array()) @pytest.mark.xfail('not HAS_BZ2') @pytest.mark.parametrize('filename', ['t/short.rdb.bz2', 't/ipac.dat.bz2']) def test_bzip2(filename): t_comp = read(os.path.join(ROOT, filename)) t_uncomp = read(os.path.join(ROOT, filename.replace('.bz2', ''))) assert t_comp.dtype.names == t_uncomp.dtype.names assert np.all(t_comp.as_array() == t_uncomp.as_array()) @pytest.mark.xfail('not HAS_XZ') @pytest.mark.parametrize('filename', ['t/short.rdb.xz', 't/ipac.dat.xz']) def test_xz(filename): t_comp = read(os.path.join(ROOT, filename)) t_uncomp = read(os.path.join(ROOT, filename.replace('.xz', ''))) assert t_comp.dtype.names == t_uncomp.dtype.names assert np.all(t_comp.as_array() == t_uncomp.as_array())
f7d9e98c4b7e9a6f033319d64a40160fa9bf838935681a986bfcb554c0538fb4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``ECSV`` reader/writer. Requires `pyyaml <http://pyyaml.org/>`_ to be installed. """ import os import copy import sys from io import StringIO import pytest import numpy as np from ....table import Table, Column, QTable, NdarrayMixin from ....table.table_helpers import simple_table from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation from ....time import Time, TimeDelta from ....tests.helper import quantity_allclose from ....units.quantity import QuantityInfo from ..ecsv import DELIMITERS from ... import ascii from .... import units as u try: import yaml # pylint: disable=W0611 HAS_YAML = True except ImportError: HAS_YAML = False DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64', 'float16', 'float32', 'float64', 'float128', 'str'] if os.name == 'nt' or sys.maxsize <= 2**32: DTYPES.remove('float128') T_DTYPES = Table() for dtype in DTYPES: if dtype == 'bool': data = np.array([False, True, False]) elif dtype == 'str': data = np.array(['ab 0', 'ab, 1', 'ab2']) else: data = np.arange(3, dtype=dtype) c = Column(data, unit='m / s', description='descr_' + dtype, meta={'meta ' + dtype: 1}) T_DTYPES[dtype] = c T_DTYPES.meta['comments'] = ['comment1', 'comment2'] # Corresponds to simple_table() SIMPLE_LINES = ['# %ECSV 0.9', '# ---', '# datatype:', '# - {name: a, datatype: int64}', '# - {name: b, datatype: float64}', '# - {name: c, datatype: string}', '# schema: astropy-2.0', 'a b c', '1 1.0 c', '2 2.0 d', '3 3.0 e'] @pytest.mark.skipif('not HAS_YAML') def test_write_simple(): """ Write a simple table with common types. This shows the compact version of serialization with one line per column. """ t = simple_table() out = StringIO() t.write(out, format='ascii.ecsv') assert out.getvalue().splitlines() == SIMPLE_LINES @pytest.mark.skipif('not HAS_YAML') def test_write_full(): """ Write a full-featured table with common types and explicitly checkout output """ t = T_DTYPES['bool', 'int64', 'float64', 'str'] lines = ['# %ECSV 0.9', '# ---', '# datatype:', '# - name: bool', '# unit: m / s', '# datatype: bool', '# description: descr_bool', '# meta: {meta bool: 1}', '# - name: int64', '# unit: m / s', '# datatype: int64', '# description: descr_int64', '# meta: {meta int64: 1}', '# - name: float64', '# unit: m / s', '# datatype: float64', '# description: descr_float64', '# meta: {meta float64: 1}', '# - name: str', '# unit: m / s', '# datatype: string', '# description: descr_str', '# meta: {meta str: 1}', '# meta: !!omap', '# - comments: [comment1, comment2]', '# schema: astropy-2.0', 'bool int64 float64 str', 'False 0 0.0 "ab 0"', 'True 1 1.0 "ab, 1"', 'False 2 2.0 ab2'] out = StringIO() t.write(out, format='ascii.ecsv') assert out.getvalue().splitlines() == lines @pytest.mark.skipif('not HAS_YAML') def test_write_read_roundtrip(): """ Write a full-featured table with all types and see that it round-trips on readback. Use both space and comma delimiters. """ t = T_DTYPES for delimiter in DELIMITERS: out = StringIO() t.write(out, format='ascii.ecsv', delimiter=delimiter) t2s = [Table.read(out.getvalue(), format='ascii.ecsv'), Table.read(out.getvalue(), format='ascii'), ascii.read(out.getvalue()), ascii.read(out.getvalue(), format='ecsv', guess=False), ascii.read(out.getvalue(), format='ecsv')] for t2 in t2s: assert t.meta == t2.meta for name in t.colnames: assert t[name].attrs_equal(t2[name]) assert np.all(t[name] == t2[name]) @pytest.mark.skipif('not HAS_YAML') def test_bad_delimiter(): """ Passing a delimiter other than space or comma gives an exception """ out = StringIO() with pytest.raises(ValueError) as err: T_DTYPES.write(out, format='ascii.ecsv', delimiter='|') assert 'only space and comma are allowed' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_bad_header_start(): """ Bad header without initial # %ECSV x.x """ lines = copy.copy(SIMPLE_LINES) lines[0] = '# %ECV 0.9' with pytest.raises(ascii.InconsistentTableError): Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) @pytest.mark.skipif('not HAS_YAML') def test_bad_delimiter_input(): """ Illegal delimiter in input """ lines = copy.copy(SIMPLE_LINES) lines.insert(2, '# delimiter: |') with pytest.raises(ValueError) as err: Table.read('\n'.join(lines), format='ascii.ecsv', guess=False) assert 'only space and comma are allowed' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_multidim_input(): """ Multi-dimensional column in input """ t = Table([np.arange(4).reshape(2, 2)], names=['a']) out = StringIO() with pytest.raises(ValueError) as err: t.write(out, format='ascii.ecsv') assert 'ECSV format does not support multidimensional column' in str(err.value) @pytest.mark.skipif('not HAS_YAML') def test_round_trip_empty_table(): """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c']) out = StringIO() t.write(out, format='ascii.ecsv') t2 = Table.read(out.getvalue(), format='ascii.ecsv') assert t.dtype == t2.dtype assert len(t2) == 0 @pytest.mark.skipif('not HAS_YAML') def test_csv_ecsv_colnames_mismatch(): """ Test that mismatch in column names from normal CSV header vs. ECSV YAML header raises the expected exception. """ lines = copy.copy(SIMPLE_LINES) header_index = lines.index('a b c') lines[header_index] = 'a b d' with pytest.raises(ValueError) as err: ascii.read(lines, format='ecsv') assert "column names from ECSV header ['a', 'b', 'c']" in str(err) @pytest.mark.skipif('not HAS_YAML') def test_regression_5604(): """ See https://github.com/astropy/astropy/issues/5604 for more. """ t = Table() t.meta = {"foo": 5*u.km, "foo2": u.s} t["bar"] = [7]*u.km out = StringIO() t.write(out, format="ascii.ecsv") assert '!astropy.units.Unit' in out.getvalue() assert '!astropy.units.Quantity' in out.getvalue() def assert_objects_equal(obj1, obj2, attrs, compare_class=True): if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description'] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split('.'): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f': assert quantity_allclose(a1, a2, rtol=1e-10) else: assert np.all(a1 == a2) el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') scc = sc.copy() scc.representation = 'cartesian' tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0]) tm2 = Time(tm, format='iso') tm3 = Time(tm, location=el) tm3.info.serialize_method['ecsv'] = 'jd1_jd2' mixin_cols = { 'tm': tm, 'tm2': tm2, 'tm3': tm3, 'dt': TimeDelta([1, 2] * u.day), 'sc': sc, 'scc': scc, 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', obstime=['J1990.5'] * 2), 'q': [1, 2] * u.m, 'lat': Latitude([1, 2] * u.deg), 'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg), 'ang': Angle([1, 2] * u.deg), 'el': el, # 'nd': NdarrayMixin(el) # not supported yet } time_attrs = ['value', 'shape', 'format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location'] compare_attrs = { 'c1': ['data'], 'c2': ['data'], 'tm': time_attrs, 'tm2': time_attrs, 'tm3': time_attrs, 'dt': ['shape', 'value', 'format', 'scale'], 'sc': ['ra', 'dec', 'representation', 'frame.name'], 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], 'q': ['value', 'unit'], 'lon': ['value', 'unit', 'wrap_angle'], 'lat': ['value', 'unit'], 'ang': ['value', 'unit'], 'el': ['x', 'y', 'z', 'ellipsoid'], 'nd': ['x', 'y', 'z'], } @pytest.mark.skipif('not HAS_YAML') def test_ecsv_mixins_ascii_read_class(): """Ensure that ascii.read(ecsv_file) returns the correct class (QTable if any Quantity subclasses, Table otherwise). """ # Make a table with every mixin type except Quantities t = QTable({name: col for name, col in mixin_cols.items() if not isinstance(col.info, QuantityInfo)}) out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format='ecsv') assert type(t2) is Table # Add a single quantity column t['lon'] = mixin_cols['lon'] out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format='ecsv') assert type(t2) is QTable @pytest.mark.skipif('not HAS_YAML') def test_ecsv_mixins_qtable_to_table(): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ['unit'] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.allclose(col.value, col2, rtol=1e-10) assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_ecsv_mixins_as_one(table_cls): """Test write/read all cols at once and validate intermediate column names""" names = sorted(mixin_cols) serialized_names = ['ang', 'dt', 'el.x', 'el.y', 'el.z', 'lat', 'lon', 'q', 'sc.ra', 'sc.dec', 'scc.x', 'scc.y', 'scc.z', 'scd.ra', 'scd.dec', 'scd.distance', 'scd.obstime', 'tm', # serialize_method is formatted_value 'tm2', # serialize_method is formatted_value 'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2 'tm3.location.x', 'tm3.location.y', 'tm3.location.z'] t = table_cls([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames # Read as a ascii.basic table (skip all the ECSV junk) t3 = table_cls.read(out.getvalue(), format='ascii.basic') assert t3.colnames == serialized_names @pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('name_col', list(mixin_cols.items())) @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_ecsv_mixins_per_column(table_cls, name_col): """Test write/read one col at a time and do detailed validation""" name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=['c1', name, 'c2']) t[name].info.description = 'description' if not t.has_mixin_columns: pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') if isinstance(t[name], NdarrayMixin): pytest.xfail('NdarrayMixin not supported') out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format='ascii.ecsv') assert t.colnames == t2.colnames for colname in t.colnames: assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) # Special case to make sure Column type doesn't leak into Time class data if name.startswith('tm'): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray
48fe6f2333df076a5e934723c879c27da9fb8c2bb2e330fb6aa034f8c7c7e4ac
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst import re from io import BytesIO, open from collections import OrderedDict import locale import platform from io import StringIO import pathlib import pytest import numpy as np from ... import ascii from ....table import Table from .... import table from ....units import Unit from ....table.table_helpers import simple_table from .common import (raises, assert_equal, assert_almost_equal, assert_true) from .. import core from ..ui import _probably_html, get_read_trace, cparser # setup/teardown function to have the tests run in the correct directory from .common import setup_function, teardown_function try: import bz2 # pylint: disable=W0611 except ImportError: HAS_BZ2 = False else: HAS_BZ2 = True @pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False}, {'use_fast_converter': True}, 'force']) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = 'U' dat = ascii.read(['a', '1' * 10000], format='basic', fast_reader=fast_reader, guess=False) assert dat['a'].dtype.kind == expected_kind def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(['1,2', '3,4'], names=('a', 'b')) assert len(dat) == 2 assert dat.colnames == ['a', 'b'] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c,d', '3,4'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(['c d', 'e f'], names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(['1,2', '3,4'], format='basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic') assert len(dat) == 1 assert dat.colnames == ['a', 'b'] dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] # For good measure check the same in the unified I/O interface dat = Table.read(['1,2', '3,4'], format='ascii.basic') assert len(dat) == 1 assert dat.colnames == ['1', '2'] dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b')) assert len(dat) == 1 assert dat.colnames == ['a', 'b'] def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc')) assert len(dat) == 2 Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header', names=['a', 'b', 'c']) assert len(dat) == 2 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ with pytest.raises(ValueError): ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts and 'fast_{0}'.format(test_opts['Reader']._format_name) \ in core.FAST_CLASSES: # has fast version if 'Inputter' not in test_opts: # fast reader doesn't allow this test_opts['fast_reader'] = fast_reader table = ascii.read(testfile['name'], **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_all_files_via_table(fast_reader): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue print('\n\n******** READING {}'.format(testfile['name'])) for guess in (True, False): test_opts = testfile['opts'].copy() if 'guess' not in test_opts: test_opts['guess'] = guess if 'Reader' in test_opts: format = 'ascii.{0}'.format(test_opts['Reader']._format_name) del test_opts['Reader'] else: format = 'ascii' if 'fast_{0}'.format(format) in core.FAST_CLASSES: test_opts['fast_reader'] = fast_reader table = Table.read(testfile['name'], format=format, **test_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get('skip'): print('\n\n******** SKIPPING {}'.format(testfile['name'])) continue if not testfile['opts'].get('guess', True): continue print('\n\n******** READING {}'.format(testfile['name'])) for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []): # Copy read options except for those in filter_read_opts guess_opts = dict((k, v) for k, v in testfile['opts'].items() if k not in filter_read_opts) table = ascii.read(testfile['name'], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile['cols']) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile['nrows']) def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot) for colname in table.colnames: # Three columns have all INDEF values and are masked mask_value = colname in ('OTIME', 'MAG', 'MERR', 'XAIRMASS') assert np.all(table[colname].mask == mask_value) def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot) assert table['LID'].dtype.char in 'fd' # float or double assert table['MAG'].dtype.char in 'fd' # even without any data values assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int) assert table['ID'].dtype.char in 'il' # int or long def test_daophot_header_keywords(): table = ascii.read('t/daophot.dat', Reader=ascii.Daophot) expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'), ('REJFILE', '"hello world"', 'filename', '%-23s'), ('SCALE', '1.', 'units/pix', '%-23.7g'),) keywords = table.meta['keywords'] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword['value'], value) assert_equal(keyword['units'], units) assert_equal(keyword['format'], format_) def test_daophot_multiple_aperture(): table = ascii.read('t/daophot3.dat', Reader=ascii.Daophot) assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file assert table['MERR2'][0] == 1.171 assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read('t/daophot4.dat', Reader=ascii.Daophot) assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file assert table['MERR2'][0] == 0.049 assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('t/no_data_without_header.dat', Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('t/simple.txt', guess=False, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('t/bad.txt', fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read('t/simple5.txt', delimiter='|', fast_reader=fast_reader) @raises(OSError) def test_missing_file(): ascii.read('does_not_exist') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') data = ascii.read('t/simple3.txt', names=names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_include_names(fast_reader): names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6') include_names = ('c1', 'c3') data = ascii.read('t/simple3.txt', names=names, include_names=include_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_set_exclude_names(fast_reader): exclude_names = ('Y', 'object') data = ascii.read('t/simple3.txt', exclude_names=exclude_names, delimiter='|', fast_reader=fast_reader) assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad')) def test_include_names_daophot(): include_names = ('ID', 'MAG', 'PIER') data = ascii.read('t/daophot.dat', include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR') data = ascii.read('t/daophot.dat', exclude_names=exclude_names) assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER')) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter='|') reader.inputter.process_lines = process_lines data = reader.read('t/bars_at_ends.txt') assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad')) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r'^\|\s*', '', line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter='|') reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read('t/nls1_stackinfo.dbout') cols = get_testfiles('t/nls1_stackinfo.dbout')['cols'] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = 't/test4.dat' data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091) assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704) assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148) assert_equal(data.field('statname')[2], 'chi2modvar') assert_almost_equal(data.field('statval')[2], 497.56468441) def test_start_end(): data = ascii.read('t/test5.dat', header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field('statname')[0], 'chi2xspecvar') assert_equal(data.field('statname')[-1], 'chi2gehrels') def test_set_converters(): converters = {'zabs1.nh': [ascii.convert_numpy('int32'), ascii.convert_numpy('float32')], 'p1.gamma': [ascii.convert_numpy('str')] } data = ascii.read('t/test4.dat', converters=converters) assert_equal(str(data['zabs1.nh'].dtype), 'float32') assert_equal(data['p1.gamma'][0], '1.26764500000') @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_string(fast_reader): f = 't/simple.txt' with open(f) as fd: table = fd.read() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_filelike(fast_reader): f = 't/simple.txt' testfile = get_testfiles(f) with open(f, 'rb') as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_from_lines(fast_reader): f = 't/simple.txt' with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f) data = ascii.read(table, fast_reader=fast_reader, **testfile['opts']) assert_equal(data.dtype.names, testfile['cols']) assert_equal(len(data), testfile['nrows']) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read('t/apostrophe.rdb') assert_equal(table.comment_lines, ['# first comment', ' # second comment']) assert_equal(data.meta['comments'], ['first comment', 'second comment']) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values(fast_reader): f = 't/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, **testfile['opts']) assert_true((data['a'].mask == [False, True]).all()) assert_true((data['a'] == [1, 1]).all()) assert_true((data['b'].mask == [False, True]).all()) assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_col(fast_reader): f = 't/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader, **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_include_names(fast_reader): f = 't/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_include_names=['b'], **testfile['opts']) check_fill_values(data) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_exclude_names(fast_reader): f = 't/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader, fill_exclude_names=['a'], **testfile['opts']) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation """ assert_true((data['a'].mask == [False, False]).all()) assert_true((data['a'] == ['1', 'a']).all()) assert_true((data['b'].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data['b'] == [2, -999]).all()) data['b'].mask = False # explicitly unmask for comparison assert_true((data['b'] == [2, 1]).all()) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_fill_values_list(fast_reader): f = 't/fill_values.txt' testfile = get_testfiles(f) data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')], fast_reader=fast_reader, **testfile['opts']) data['a'].mask = False # explicitly unmask for comparison assert_true((data['a'] == [42, 42]).all()) def test_masking_Cds(): f = 't/cds.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert_true(data['AK'].mask[0]) assert_true(not data['Fit'].mask[0]) def test_null_Ipac(): f = 't/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) mask = np.array([(True, False, True, False, True), (False, False, False, False, False)], dtype=[(str('ra'), '|b1'), (str('dec'), '|b1'), (str('sai'), '|b1'), (str('v2'), '|b1'), (str('sptype'), '|b1')]) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict((('intval', 1), ('floatval', 2.3e3), ('date', "Wed Sp 20 09:48:36 1995"), ('key_continue', 'IPAC keywords can continue across lines'))) comments = ['This is an example of a valid comment'] f = 't/ipac.dat' testfile = get_testfiles(f) data = ascii.read(f, **testfile['opts']) assert data.meta['keywords'].keys() == keywords.keys() for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()): assert data_kv['value'] == kv assert data.meta['comments'] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read('t/space_delim_no_header.dat', delimiter=',', guess=True) assert(data.dtype.names == ('1 3.4 hello',)) assert(len(data) == 1) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconstent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_default_missing(fast_reader): """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,,', '2, , 4.0 , ss ']) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is True assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [' a ', '---', ' --'] assert dat['a'].dtype.kind == 'i' # Same test with a fixed width reader table = '\n'.join([' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss']) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is True assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 -- --', ' 2 -- 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [' a b c d ', '--- --- --- ---', ' 1 3 ', ' 2 4.0 ss'] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 't/apostrophe.rdb', 'nrows': 2, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 't/apostrophe.tab', 'nrows': 2, 'opts': {'Reader': ascii.Tab}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 't/cds.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds}}, # Test malformed CDS file (issues #2241 #467) {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 't/cds_malformed.dat', 'nrows': 1, 'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}}, {'cols': ('a', 'b', 'c'), 'name': 't/commented_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader}}, {'cols': ('a', 'b', 'c'), 'name': 't/commented_header2.dat', 'nrows': 2, 'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5'), 'name': 't/continuation.dat', 'nrows': 2, 'opts': {'Inputter': ascii.ContinuationLinesInputter, 'Reader': ascii.NoHeader}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 't/daophot.dat', 'nrows': 2, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALU-ES', 'VALU-ES_1', 'FLAG'), 'name': 't/sextractor.dat', 'nrows': 3, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 't/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('col0', 'objID', 'osrcid', 'xsrcid', 'SpecObjID', 'ra', 'dec', 'obsid', 'ccdid', 'z', 'modelMag_i', 'modelMagErr_i', 'modelMag_r', 'modelMagErr_r', 'expo', 'theta', 'rad_ecf_39', 'detlim90', 'fBlim90'), 'name': 't/nls1_stackinfo.dbout', 'nrows': 58, 'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}}, {'cols': ('Index', 'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs', 'Match', 'Class', 'AK', 'Fit'), 'name': 't/no_data_cds.dat', 'nrows': 0, 'opts': {'Reader': ascii.Cds}}, {'cols': ('ID', 'XCENTER', 'YCENTER', 'MAG', 'MERR', 'MSKY', 'NITER', 'SHARPNESS', 'CHI', 'PIER', 'PERROR'), 'name': 't/no_data_daophot.dat', 'nrows': 0, 'opts': {'Reader': ascii.Daophot}}, {'cols': ('NUMBER', 'FLUX_ISO', 'FLUXERR_ISO', 'VALUES', 'VALUES_1', 'FLAG'), 'name': 't/no_data_sextractor.dat', 'nrows': 0, 'opts': {'Reader': ascii.SExtractor}}, {'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'), 'name': 't/no_data_ipac.dat', 'nrows': 0, 'opts': {'Reader': ascii.Ipac}}, {'cols': ('ra', 'v2'), 'name': 't/ipac.dat', 'nrows': 2, 'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}}, {'cols': ('a', 'b', 'c'), 'name': 't/no_data_with_header.dat', 'nrows': 0, 'opts': {}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 't/short.rdb', 'nrows': 7, 'opts': {'Reader': ascii.Rdb}}, {'cols': ('agasc_id', 'n_noids', 'n_obs'), 'name': 't/short.tab', 'nrows': 7, 'opts': {'Reader': ascii.Tab}}, {'cols': ('test 1a', 'test2', 'test3', 'test4'), 'name': 't/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'"}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 't/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 't/simple.txt', 'nrows': 1, 'opts': {'quotechar': "'", 'header_start': 1}}, {'cols': ('top1', 'top2', 'top3', 'top4'), 'name': 't/simple.txt', 'nrows': 2, 'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 't/simple2.txt', 'nrows': 3, 'opts': {'delimiter': '|'}}, {'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'), 'name': 't/simple3.txt', 'nrows': 2, 'opts': {'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'), 'name': 't/simple4.txt', 'nrows': 3, 'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}}, {'cols': ('col1', 'col2', 'col3'), 'name': 't/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader}}, {'cols': ('col1', 'col2', 'col3'), 'name': 't/space_delim_no_header.dat', 'nrows': 2, 'opts': {'Reader': ascii.NoHeader, 'header_start': None}}, {'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'), 'name': 't/space_delim_blank_lines.txt', 'nrows': 3, 'opts': {}}, {'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'), 'name': 't/test4.dat', 'nrows': 9, 'opts': {}}, {'cols': ('a', 'b', 'c'), 'name': 't/fill_values.txt', 'nrows': 2, 'opts': {'delimiter': ','}}, {'name': 't/whitespace.dat', 'cols': ('quoted colname with tab\tinside', 'col2', 'col3'), 'nrows': 2, 'opts': {'delimiter': r'\s'}}, {'name': 't/simple_csv.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'opts': {'Reader': ascii.Csv}}, {'name': 't/simple_csv_missing.csv', 'cols': ('a', 'b', 'c'), 'nrows': 2, 'skip': True, 'opts': {'Reader': ascii.Csv}}, {'cols': ('cola', 'colb', 'colc'), 'name': 't/latex1.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Facility', 'Id', 'exposure', 'date'), 'name': 't/latex2.tex', 'nrows': 3, 'opts': {'Reader': ascii.AASTex}}, {'cols': ('cola', 'colb', 'colc'), 'name': 't/latex3.tex', 'nrows': 2, 'opts': {'Reader': ascii.Latex}}, {'cols': ('Col1', 'Col2', 'Col3', 'Col4'), 'name': 't/fixed_width_2_line.txt', 'nrows': 2, 'opts': {'Reader': ascii.FixedWidthTwoLine}}, ] try: import bs4 # pylint: disable=W0611 testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'), 'name': 't/html.html', 'nrows': 3, 'opts': {'Reader': ascii.HTML}}) except ImportError: pass if name is not None: return [x for x in testfiles if x['name'] == name][0] else: return testfiles def test_header_start_exception(): '''Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. ''' for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]: with pytest.raises(ValueError): reader = ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ['# a, b', '1, 2', '3, 4'] t = ascii.read(lines) assert t.colnames == ['a', 'b'] @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader) assert t.colnames == ['b', 'a'] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read('t/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read('t/sextractor3.dat', Reader=ascii.SExtractor, guess=False) expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'MAG_AUTO', 'MAGERR_AUTO', 'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6', 'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6'] expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')] expected_descrs = ['Object position along x', None, 'Right ascension of barycenter (J2000)', 'Declination of barycenter (J2000)', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', ] + [ 'Fixed aperture magnitude vector'] * 7 + [ 'RMS error vector for fixed aperture mag.'] * 7 for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ['abc'] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv') assert t.colnames == ['#a', 'b'] assert len(t) == 2 assert t['#a'][1] == '#3' def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3']) assert t.colnames == ['a', 'b', 'c'] assert t.meta['comments'] == ['comment1', 'comment2'] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read('asfdasdf\n1 2 3', format='basic') assert "** To figure out why the table did not read, use guess=False and" in str(err.value) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='ipac') assert 'At least one header line beginning and ending with delimiter required' in str(err.value) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False) assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value) @pytest.mark.xfail('not HAS_BZ2') def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ t = ascii.read(open('t/ipac.dat.bz2', 'rb')) assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype'] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = '\n'.join(['a,b,c,d', '1,3,1.11,1', '2, 2, 4.0 , ss ']) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|', '| r | rE | rea | real | D | do | dou | f | i | l | da| c |', ' 1 2 3 4 5 6 7 8 9 10 11 12 '] dat = ascii.read(lines, format='ipac') for name in dat.columns[0:8]: assert dat[name].dtype.kind == 'f' for name in dat.columns[8:10]: assert dat[name].dtype.kind == 'i' for name in dat.columns[10:12]: assert dat[name].dtype.kind in ('U', 'S') def test_almost_but_not_quite_daophot(): '''Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. ''' lines = ["# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9"] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize('fast', [True, False]) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected and that the table round-trips. """ lines = ['# a b', '# comment 1', '# comment 2', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert dat.meta['comments'] == ['comment 1', 'comment 2'] out = StringIO() ascii.write(dat, out, format='commented_header', fast_writer=fast) assert out.getvalue().splitlines() == lines lines = ['# a b', '1 2', '3 4'] dat = ascii.read(lines, format='commented_header', fast_reader=fast) assert 'comments' not in dat.meta def test_probably_html(): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for table in ('t/html.html', 'http://blah.com/table.html', 'https://blah.com/table.html', 'file://blah/table.htm', 'ftp://blah.com/table.html', 'file://blah.com/table.htm', ' <! doctype html > hello world', 'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype html > ', ' hello world'), ): assert _probably_html(table) is True for table in ('t/html.htms', 'Xhttp://blah.com/table.html', ' https://blah.com/table.htm', 'fole://blah/table.htm', ' < doctype html > hello world', 'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk', ['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'], (' <! doctype htm > ', ' hello world'), [[1, 2, 3]], ): assert _probably_html(table) is False @pytest.mark.parametrize('fast_reader', [True, False, 'force']) def test_data_header_start(fast_reader): tests = [(['# comment', '', ' ', 'skip this line', # line 0 'a b', # line 1 '1 2'], # line 2 [{'header_start': 1}, {'header_start': 1, 'data_start': 2} ] ), (['# comment', '', ' \t', 'skip this line', # line 0 'a b', # line 1 '', ' \t', 'skip this line', # line 2 '1 2'], # line 3 [{'header_start': 1, 'data_start': 3}]), (['# comment', '', ' ', 'a b', # line 0 '', ' ', 'skip this line', # line 1 '1 2'], # line 2 [{'header_start': 0, 'data_start': 2}, {'data_start': 2}])] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read(lines, format='basic', fast_reader=fast_reader, guess=True, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 1 assert np.all(t['a'] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]['kwargs']['Reader'] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format='basic') assert 'No header line found' in str(err.value) table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=True, format='fast_basic') assert 'Inconsistent data column lengths' in str(err.value) # Put a single line of column names but with no newline for kwargs in [dict(), dict(guess=False, fast_reader=False, format='basic'), dict(guess=False, fast_reader=True, format='fast_basic')]: table = BytesIO() table.write(b'a b') t = ascii.read(table, **kwargs) assert t.colnames == ['a', 'b'] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path('t/simple.txt') data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4'] assert data['test2'][1] == 'hat2' def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format='ipac') assert 'Column col0 failed to convert:' in str(err.value) with pytest.raises(ValueError) as err: ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []}) assert 'no converters' in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == 'Darwin': locale.setlocale(locale.LC_ALL, str('de_DE')) else: locale.setlocale(locale.LC_ALL, str('de_DE.utf8')) for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}): t = ascii.read(['a b', '1.5 2'], format='basic', guess=False, fast_reader=fast_reader) assert t['a'].dtype.kind == 'f' except locale.Error as e: pytest.skip('Locale error: {}'.format(e)) finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): '''Test that a char column of a Table is assigned no unit and not a dimensionless unit.''' t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {'--': '0'} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert reader.read("""# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """)['a'][0] is np.ma.masked def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format='latex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format='aastex') assert dat.colnames == ['a', 'b', 'c'] assert np.all(dat['a'] == ['1', r'3\%']) assert np.all(dat['c'] == ['c', 'e']) @pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252']) def test_read_with_encoding(tmpdir, encoding): data = { 'commented_header': u'# à b è \n 1 2 héllo', 'csv': u'à,b,è\n1,2,héllo' } testfile = str(tmpdir.join('test.txt')) for fmt, content in data.items(): with open(testfile, 'w', encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] for guess in (True, False): table = ascii.read(testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess) assert table['è'].dtype.kind == 'U' assert table.pformat() == [' à b è ', '--- --- -----', ' 1 2 héllo'] def test_unsupported_read_with_encoding(tmpdir): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read('t/simple3.txt', guess=False, fast_reader='force', encoding='latin1', format='fast_csv') def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = 't/test5.dat' t1 = ascii.read(fpath, header_start=1, data_start=3, ) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): t_gen = ascii.read(fp, header_start=1, data_start=3, guess=False, format='fast_basic', fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()): # Now read the full table in chunks t3 = ascii.read(fp, header_start=1, data_start=3, fast_reader={'chunk_size': 300}) assert np.all(t1 == t3) @pytest.mark.parametrize('masked', [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, 'col{}'.format(i + 1)) # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in 'tab', 'csv', 'no_header', 'rdb', 'basic': out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400, 'chunk_generator': True}) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = 't/test5.dat' with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={'chunk_size': 10}) assert 'no newline found in chunk (chunk_size too small?)' in str(err) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50 table = '\n'.join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={'chunk_size': 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv') assert np.all(table['col1'] == ['\u2119', '1']) assert np.all(table['col2'] == ['\u01b4', '2'])
10f4cf70ac2e768243da06950e6d15d002194a5d240f81e5572c3ad6d1009db5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import copy from io import StringIO from itertools import chain import pytest import numpy as np from ... import ascii from .... import table from ....table.table_helpers import simple_table from ....tests.helper import catch_warnings from ....utils.exceptions import AstropyWarning, AstropyDeprecationWarning from .... import units from .common import setup_function, teardown_function # Check to see if the BeautifulSoup dependency is present. try: from bs4 import BeautifulSoup, FeatureNotFound HAS_BEAUTIFUL_SOUP = True except ImportError: HAS_BEAUTIFUL_SOUP = False test_defs = [ dict(kwargs=dict(), out="""\ ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), dict(kwargs=dict(delimiter=None), out="""\ ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), dict(kwargs=dict(formats={'XCENTER': '%12.1f', 'YCENTER': '{0:.1f}'}, include_names=['XCENTER', 'YCENTER'], strip_whitespace=False), out="""\ XCENTER YCENTER " 138.5" 256.4 " 18.1" 280.2 """ ), dict(kwargs=dict(Writer=ascii.Rdb, exclude_names=['CHI']), out="""\ ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR N\tN\tN\tN\tN\tN\tN\tN\tN\tS 14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error 18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error """ ), dict(kwargs=dict(Writer=ascii.Tab), out="""\ ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR 14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error 18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error """ ), dict(kwargs=dict(Writer=ascii.Csv), out="""\ ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR 14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error 18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error """ ), dict(kwargs=dict(Writer=ascii.NoHeader), out="""\ 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), dict(kwargs=dict(Writer=ascii.CommentedHeader), out="""\ # ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), dict(kwargs=dict(Writer=ascii.CommentedHeader, comment='&'), out="""\ &ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), dict(kwargs=dict(Writer=ascii.Latex), out="""\ \\begin{table} \\begin{tabular}{ccccccccccc} ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ & pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\ 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\ \\end{tabular} \\end{table} """ ), dict(kwargs=dict(Writer=ascii.AASTex), out="""\ \\begin{deluxetable}{ccccccccccc} \\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}} \\startdata 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\enddata \\end{deluxetable} """ ), dict( kwargs=dict(Writer=ascii.AASTex, caption='Mag values \\label{tab1}', latexdict={ 'units': {'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'deluxetable*', 'tablealign': 'htpb'}), out="""\ \\begin{deluxetable*}{ccccccccccc}[htpb] \\tablecaption{Mag values \\label{tab1}} \\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}} \\startdata 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\enddata \\end{deluxetable*} """ ), dict( kwargs=dict(Writer=ascii.Latex, caption='Mag values \\label{tab1}', latexdict={'preamble': '\\begin{center}', 'tablefoot': '\\end{center}', 'data_end': ['\\hline', '\\hline'], 'units':{'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'table*', 'tablealign': 'h'}, col_align='|lcccccccccc|'), out="""\ \\begin{table*}[h] \\begin{center} \\caption{Mag values \\label{tab1}} \\begin{tabular}{|lcccccccccc|} ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ & [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\ 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\ \\hline \\hline \\end{tabular} \\end{center} \\end{table*} """ ), dict(kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts['template']), out="""\ \\begin{tabletype}[tablealign] preamble \\caption{caption} \\begin{tabular}{col_align} header_start ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ & pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\ header_end data_start 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\ data_end \\end{tabular} tablefoot \\end{tabletype} """ ), dict(kwargs=dict(Writer=ascii.Latex, latexdict={'tabletype': None}), out="""\ \\begin{tabular}{ccccccccccc} ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ & pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\ 14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\ \\end{tabular} """ ), dict(kwargs=dict(Writer=ascii.HTML, htmldict={'css': 'table,th,td{border:1px solid black;'}), out="""\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> <style> table,th,td{border:1px solid black; </style> </head> <body> <table> <thead> <tr> <th>ID</th> <th>XCENTER</th> <th>YCENTER</th> <th>MAG</th> <th>MERR</th> <th>MSKY</th> <th>NITER</th> <th>SHARPNESS</th> <th>CHI</th> <th>PIER</th> <th>PERROR</th> </tr> </thead> <tr> <td>14</td> <td>138.538</td> <td>256.405</td> <td>15.461</td> <td>0.003</td> <td>34.85955</td> <td>4</td> <td>-0.032</td> <td>0.802</td> <td>0</td> <td>No_error</td> </tr> <tr> <td>18</td> <td>18.114</td> <td>280.170</td> <td>22.329</td> <td>0.206</td> <td>30.12784</td> <td>4</td> <td>-2.544</td> <td>1.104</td> <td>0</td> <td>No_error</td> </tr> </table> </body> </html> """ ), dict(kwargs=dict(Writer=ascii.Ipac), out="""\ \\MERGERAD='INDEF' \\IRAF='NOAO/IRAFV2.10EXPORT' \\USER='' \\HOST='tucana' \\DATE='05-28-93' \\TIME='14:46:13' \\PACKAGE='daophot' \\TASK='nstar' \\IMAGE='test' \\GRPFILE='test.psg.1' \\PSFIMAGE='test.psf.1' \\NSTARFILE='test.nst.1' \\REJFILE='"hello world"' \\SCALE='1.' \\DATAMIN='50.' \\DATAMAX='24500.' \\GAIN='1.' \\READNOISE='0.' \\OTIME='00:07:59.0' \\XAIRMASS='1.238106' \\IFILTER='V' \\RECENTER='yes' \\FITSKY='no' \\PSFMAG='16.594' \\PSFRAD='5.' \\FITRAD='3.' \\MAXITER='50' \\MAXGROUP='60' \\FLATERROR='0.75' \\PROFERROR='5.' \\CLIPEXP='6' \\CLIPRANGE='2.5' | ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR| | long| double| double| double| double| double| long| double| double| long| char| | | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors| | null| null| null| null| null| null| null| null| null| null| null| 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """ ), ] test_defs_no_data = [ dict(kwargs=dict(Writer=ascii.Ipac), out="""\ \\ This is an example of a valid comment. \\ The 2nd data line is used to verify the exact column parsing \\ (unclear if this is a valid for the IPAC format) \\catalog='sao' \\date='Wed Sp 20 09:48:36 1995' \\mykeyword='Another way for defining keyvalue string' | ra| dec| sai| v2|sptype| |double|double|long|double| char| | unit| unit|unit| unit| ergs| | null| null|null| null| null| """ ), ] tab_to_fill = ['a b c', '1 2 3', '1 1 3'] test_defs_fill_value = [ dict(kwargs=dict(), out="""\ a b c 1 2 3 1 1 3 """ ), dict(kwargs=dict(fill_values=('1', 'w')), out="""\ a b c w 2 3 w w 3 """ ), dict(kwargs=dict(fill_values=('1', 'w', 'b')), out="""\ a b c 1 2 3 1 w 3 """ ), dict(kwargs=dict(fill_values=('1', 'w'), fill_include_names=['b']), out="""\ a b c 1 2 3 1 w 3 """ ), dict(kwargs=dict(fill_values=('1', 'w'), fill_exclude_names=['a']), out="""\ a b c 1 2 3 1 w 3 """ ), dict(kwargs=dict(fill_values=('1', 'w'), fill_include_names=['a'], fill_exclude_names=['a', 'b']), out="""\ a b c 1 2 3 1 1 3 """ ), dict(kwargs=dict(fill_values=[('1', 'w')], formats={'a': '%4.2f'}), out="""\ a b c 1.00 2 3 1.00 w 3 """ ), ] test_def_masked_fill_value = [ dict(kwargs=dict(), out="""\ a b c "" 2 3 1 1 "" """ ), dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'X')]), out="""\ a b c X 2 3 w w X """ ), dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'XXX')], formats={'a': '%4.1f'}), out="""\ a b c XXX 2 3 1.0 w XXX """ ), dict(kwargs=dict(Writer=ascii.Csv), out="""\ a,b,c ,2,3 1,1, """ ), ] def check_write_table(test_def, table, fast_writer): out = StringIO() try: ascii.write(table, out, fast_writer=fast_writer, **test_def['kwargs']) except ValueError as e: # if format doesn't have a fast writer, ignore if 'not in the list of formats with fast writers' not in str(e): raise e return print('Expected:\n{}'.format(test_def['out'])) print('Actual:\n{}'.format(out.getvalue())) assert [x.strip() for x in out.getvalue().strip().splitlines()] == [ x.strip() for x in test_def['out'].strip().splitlines()] def check_write_table_via_table(test_def, table, fast_writer): out = StringIO() test_def = copy.deepcopy(test_def) if 'Writer' in test_def['kwargs']: format = 'ascii.{0}'.format(test_def['kwargs']['Writer']._format_name) del test_def['kwargs']['Writer'] else: format = 'ascii' try: table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs']) except ValueError as e: # if format doesn't have a fast writer, ignore if 'not in the list of formats with fast writers' not in str(e): raise e return print('Expected:\n{}'.format(test_def['out'])) print('Actual:\n{}'.format(out.getvalue())) assert [x.strip() for x in out.getvalue().strip().splitlines()] == [ x.strip() for x in test_def['out'].strip().splitlines()] @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_table(fast_writer): table = ascii.get_reader(Reader=ascii.Daophot) data = table.read('t/daophot.dat') for test_def in test_defs: check_write_table(test_def, data, fast_writer) check_write_table_via_table(test_def, data, fast_writer) @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_fill_values(fast_writer): data = ascii.read(tab_to_fill) for test_def in test_defs_fill_value: check_write_table(test_def, data, fast_writer) @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_fill_masked_different(fast_writer): '''see discussion in #2255''' data = ascii.read(tab_to_fill) data = table.Table(data, masked=True) data['a'].mask = [True, False] data['c'].mask = [False, True] for test_def in test_def_masked_fill_value: check_write_table(test_def, data, fast_writer) @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_no_data_ipac(fast_writer): """Write an IPAC table that contains no data.""" table = ascii.get_reader(Reader=ascii.Ipac) data = table.read('t/no_data_ipac.dat') for test_def in test_defs_no_data: check_write_table(test_def, data, fast_writer) check_write_table_via_table(test_def, data, fast_writer) def test_write_invalid_toplevel_meta_ipac(): """Write an IPAC table that contains no data but has invalid (incorrectly specified) metadata stored in the top-level metadata and therefore should raise a warning, and check that the warning has been raised""" table = ascii.get_reader(Reader=ascii.Ipac) data = table.read('t/no_data_ipac.dat') data.meta['blah'] = 'extra' with catch_warnings(AstropyWarning) as ASwarn: out = StringIO() data.write(out, format='ascii.ipac') assert len(ASwarn) == 1 assert "were not written" in str(ASwarn[0].message) def test_write_invalid_keyword_meta_ipac(): """Write an IPAC table that contains no data but has invalid (incorrectly specified) metadata stored appropriately in the ``keywords`` section of the metadata but with invalid format and therefore should raise a warning, and check that the warning has been raised""" table = ascii.get_reader(Reader=ascii.Ipac) data = table.read('t/no_data_ipac.dat') data.meta['keywords']['blah'] = 'invalid' with catch_warnings(AstropyWarning) as ASwarn: out = StringIO() data.write(out, format='ascii.ipac') assert len(ASwarn) == 1 assert "has been skipped" in str(ASwarn[0].message) def test_write_valid_meta_ipac(): """Write an IPAC table that contains no data and has *correctly* specified metadata. No warnings should be issued""" table = ascii.get_reader(Reader=ascii.Ipac) data = table.read('t/no_data_ipac.dat') data.meta['keywords']['blah'] = {'value': 'invalid'} with catch_warnings(AstropyWarning) as ASwarn: out = StringIO() data.write(out, format='ascii.ipac') assert len(ASwarn) == 0 @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_comments(fast_writer): """Write comments in output originally read by io.ascii.""" data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1,2,3') out = StringIO() ascii.write(data, out, format='basic', fast_writer=fast_writer) expected = ['# c1', '# c2', '# c3', 'a b c', '1 2 3'] assert out.getvalue().splitlines() == expected # header comes before comments for commented-header out = StringIO() ascii.write(data, out, format='commented_header', fast_writer=fast_writer) expected = ['# a b c', '# c1', '# c2', '# c3', '1 2 3'] assert out.getvalue().splitlines() == expected # setting comment=False should disable comment writing out = StringIO() ascii.write(data, out, format='basic', comment=False, fast_writer=fast_writer) expected = ['a b c', '1 2 3'] assert out.getvalue().splitlines() == expected @pytest.mark.parametrize("fast_writer", [True, False]) @pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}']) def test_write_format(fast_writer, fmt): """Check different formats for a column.""" data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33') out = StringIO() expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33'] data['a'].format = fmt ascii.write(data, out, format='basic', fast_writer=fast_writer) assert out.getvalue().splitlines() == expected @pytest.mark.parametrize("fast_writer", [True, False]) def test_strip_names(fast_writer): """Names should be stripped of whitespace by default.""" data = table.Table([[1], [2], [3]], names=(' A', 'B ', ' C ')) out = StringIO() ascii.write(data, out, format='csv', fast_writer=fast_writer) assert out.getvalue().splitlines()[0] == 'A,B,C' def test_latex_units(): """ Check to make sure that Latex and AASTex writers attempt to fall back on the **unit** attribute of **Column** if the supplied **latexdict** does not specify units. """ t = table.Table([table.Column(name='date', data=['a', 'b']), table.Column(name='NUV exp.time', data=[1, 2])]) latexdict = copy.deepcopy(ascii.latexdicts['AA']) latexdict['units'] = {'NUV exp.time': 's'} out = StringIO() expected = '''\ \\begin{table}{cc} \\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}} \\startdata a & 1 \\\\ b & 2 \\enddata \\end{table} '''.replace('\n', os.linesep) ascii.write(t, out, format='aastex', latexdict=latexdict) assert out.getvalue() == expected # use unit attribute instead t['NUV exp.time'].unit = units.s t['date'].unit = units.yr out = StringIO() ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA']) assert out.getvalue() == expected.replace( 'colhead{s}', r'colhead{$\mathrm{s}$}').replace( 'colhead{ }', r'colhead{$\mathrm{yr}$}') @pytest.mark.parametrize("fast_writer", [True, False]) def test_commented_header_comments(fast_writer): """ Test the fix for #3562 with confusing exception using comment=False for the commented_header writer. """ t = table.Table([[1, 2]]) with pytest.raises(ValueError) as err: out = StringIO() ascii.write(t, out, format='commented_header', comment=False, fast_writer=fast_writer) assert "for the commented_header writer you must supply a string" in str(err.value) @pytest.mark.parametrize("fast_writer", [True, False]) def test_byte_string_output(fast_writer): """ Test the fix for #4350 where byte strings were output with a leading `b` on Py3. """ t = table.Table([['Hello', 'World']], dtype=['S10']) out = StringIO() ascii.write(t, out, fast_writer=fast_writer) assert out.getvalue().splitlines() == ['col0', 'Hello', 'World'] @pytest.mark.parametrize('names, include_names, exclude_names, formats, issues_warning', [ (['x', 'y'], ['x', 'y'], ['x'], {'x': '%d', 'y': '%f'}, True), (['x', 'y'], ['x', 'y'], ['y'], {'x': '%d'}, False), (['x', 'y'], ['x', 'y'], [], {'p': '%d', 'q': '%f'}, True), (['x', 'y'], ['x', 'y'], [], {'z': '%f'}, True), (['x', 'y'], ['x', 'y'], [], {'x': '%d'}, False), (['x', 'y'], ['x', 'y'], [], {'p': '%d', 'y': '%f'}, True), (['x', 'y'], ['x', 'y'], [], {}, False) ]) def test_names_with_formats(names, include_names, exclude_names, formats, issues_warning): """Test for #4508.""" t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]]) with catch_warnings(AstropyWarning) as ASwarn: out = StringIO() ascii.write(t, out, names=names, include_names=include_names, exclude_names=exclude_names, formats=formats) assert (issues_warning == (len(ASwarn) == 1)) @pytest.mark.parametrize('formats, issues_warning', [ ({'p': '%d', 'y': '%f'}, True), ({'x': '%d', 'y': '%f'}, True), ({'z': '%f'}, True), ({}, False) ]) def test_columns_names_with_formats(formats, issues_warning): """Test the fix for #4508.""" t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]]) with catch_warnings(AstropyWarning) as ASwarn: out = StringIO() ascii.write(t, out, formats=formats) assert (issues_warning == (len(ASwarn) == 1)) @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_quoted_empty_field(fast_writer): """ Test the fix for #4350 where byte strings were output with a leading `b` on Py3. """ t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10']) out = StringIO() ascii.write(t, out, fast_writer=fast_writer) assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""'] out = StringIO() ascii.write(t, out, fast_writer=fast_writer, delimiter=',') assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ','] @pytest.mark.parametrize("format", ['ascii', 'csv', 'html', 'latex', 'ascii.fixed_width', 'html']) @pytest.mark.parametrize("fast_writer", [True, False]) def test_write_overwrite_ascii(format, fast_writer, tmpdir): """Test overwrite argument for various ASCII writers""" filename = tmpdir.join("table-tmp.dat").strpath open(filename, 'w').close() t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10']) with pytest.raises(OSError) as err: t.write(filename, overwrite=False, format=format, fast_writer=fast_writer) assert str(err.value).endswith('already exists') with catch_warnings(AstropyDeprecationWarning) as warning: t.write(filename, format=format, fast_writer=fast_writer) assert len(warning) == 1 assert str(warning[0].message).endswith( "Automatically overwriting ASCII files is deprecated. " "Use the argument 'overwrite=True' in the future.") t.write(filename, overwrite=True, format=format, fast_writer=fast_writer) # If the output is a file object, overwrite is ignored with open(filename, 'w') as fp: t.write(fp, format=format, fast_writer=fast_writer) t.write(fp, overwrite=False, format=format, fast_writer=fast_writer) t.write(fp, overwrite=True, format=format, fast_writer=fast_writer) fmt_name_classes = list(chain(ascii.core.FAST_CLASSES.items(), ascii.core.FORMAT_CLASSES.items())) @pytest.mark.parametrize("fmt_name_class", fmt_name_classes) def test_roundtrip_masked(fmt_name_class): """ Round trip a simple masked table through every writable format and confirm that reading back gives the same result. """ fmt_name, fmt_cls = fmt_name_class if not getattr(fmt_cls, '_io_registry_can_write', True): return # Skip tests for fixed_width or HTML without bs4 if ((fmt_name == 'html' and not HAS_BEAUTIFUL_SOUP) or fmt_name == 'fixed_width'): return t = simple_table(masked=True) out = StringIO() fast = fmt_name in ascii.core.FAST_CLASSES try: ascii.write(t, out, format=fmt_name, fast_writer=fast) except ImportError: # Some failed dependency, e.g. PyYAML, skip test return # No-header formats need to be told the column names kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {} t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs) assert t.colnames == t2.colnames for col, col2 in zip(t.itercols(), t2.itercols()): assert col.dtype.kind == col2.dtype.kind assert np.all(col == col2)
4a5072167df1f060e245a1f7091a1ba4dd8a360553bb736210a848e5f4d63976
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os import pytest from ....table import Table, Column from ....table.table_helpers import simple_table import numpy as np ROOT = os.path.abspath(os.path.dirname(__file__)) files = ['t/cds.dat', 't/ipac.dat', 't/daophot.dat', 't/latex1.tex', 't/simple_csv.csv'] # Check to see if the BeautifulSoup dependency is present. try: from bs4 import BeautifulSoup # pylint: disable=W0611 HAS_BEAUTIFUL_SOUP = True except ImportError: HAS_BEAUTIFUL_SOUP = False try: import yaml HAS_YAML = True except ImportError: HAS_YAML = False if HAS_BEAUTIFUL_SOUP: files.append('t/html.html') @pytest.mark.parametrize('filename', files) def test_read_generic(filename): Table.read(os.path.join(ROOT, filename), format='ascii') def test_write_generic(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) t.write(str(tmpdir.join("test")), format='ascii') def test_read_ipac(): Table.read(os.path.join(ROOT, 't/ipac.dat'), format='ipac') def test_read_cds(): Table.read(os.path.join(ROOT, 't/cds.dat'), format='cds') def test_read_dapphot(): Table.read(os.path.join(ROOT, 't/daophot.dat'), format='daophot') def test_read_latex(): Table.read(os.path.join(ROOT, 't/latex1.tex'), format='latex') def test_read_latex_noformat(): Table.read(os.path.join(ROOT, 't/latex1.tex')) def test_write_latex(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.tex")) t.write(path, format='latex') def test_write_latex_noformat(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.tex")) t.write(path) @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_read_html(): Table.read(os.path.join(ROOT, 't/html.html'), format='html') @pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_read_html_noformat(): Table.read(os.path.join(ROOT, 't/html.html')) def test_write_html(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.html")) t.write(path, format='html') def test_write_html_noformat(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.html")) t.write(path) def test_read_rdb(): Table.read(os.path.join(ROOT, 't/short.rdb'), format='rdb') def test_read_rdb_noformat(): Table.read(os.path.join(ROOT, 't/short.rdb')) def test_write_rdb(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.rdb")) t.write(path, format='rdb') def test_write_rdb_noformat(tmpdir): t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.rdb")) t.write(path) def test_read_csv(): '''If properly registered, filename should be sufficient to specify format #3189 ''' Table.read(os.path.join(ROOT, 't/simple_csv.csv')) def test_write_csv(tmpdir): '''If properly registered, filename should be sufficient to specify format #3189 ''' t = Table() t.add_column(Column(name='a', data=[1, 2, 3])) t.add_column(Column(name='b', data=['a', 'b', 'c'])) path = str(tmpdir.join("data.csv")) t.write(path) @pytest.mark.skipif('not HAS_YAML') def test_auto_identify_ecsv(tmpdir): tbl = simple_table() tmpfile = str(tmpdir.join('/tmpFile.ecsv')) tbl.write(tmpfile) tbl2 = Table.read(tmpfile) assert np.all(tbl == tbl2)
08d70fbdc775e15d2c4b7e2960052db27a27ff3ed1480283c3e2f5c7e0e429d0
# Licensed under a 3-clause BSD style license - see LICENSE.rst from ... import ascii from .common import (assert_equal, assert_almost_equal, has_isnan, setup_function, teardown_function) def read_table1(readme, data): reader = ascii.Cds(readme) return reader.read(data) def read_table2(readme, data): reader = ascii.get_reader(Reader=ascii.Cds, readme=readme) reader.outputter = ascii.TableOutputter() return reader.read(data) def read_table3(readme, data): return ascii.read(data, readme=readme) def test_description(): readme = 't/cds/description/ReadMe' data = 't/cds/description/table.dat' for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 2) assert_equal(table['Cluster'].description, 'Cluster name') assert_equal(table['Star'].description, '') assert_equal(table['Wave'].description, 'wave? Wavelength in Angstroms') assert_equal(table['El'].description, 'a') assert_equal(table['ion'].description, '- Ionization stage (1 for neutral element)') assert_equal(table['EW'].description, 'Equivalent width (in mA)') assert_equal(table['Q'].description, 'DAOSPEC quality parameter Q(large values are bad)') def test_multi_header(): readme = 't/cds/multi/ReadMe' data = 't/cds/multi/lhs2065.dat' for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 18) assert_almost_equal(table['Lambda'][-1], 6479.32) assert_equal(table['Fnu'][-1], '0.285937') data = 't/cds/multi/lp944-20.dat' for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 18) assert_almost_equal(table['Lambda'][0], 6476.09) assert_equal(table['Fnu'][-1], '0.489005') def test_glob_header(): readme = 't/cds/glob/ReadMe' data = 't/cds/glob/lmxbrefs.dat' for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 291) assert_equal(table['Name'][-1], 'J1914+0953') assert_equal(table['BibCode'][-2], '2005A&A...432..235R') def test_header_from_readme(): r = ascii.Cds("t/vizier/ReadMe") table = r.read("t/vizier/table1.dat") assert len(r.data.data_lines) == 15 assert len(table) == 15 assert len(table.keys()) == 18 Bmag = [14.79, 15.00, 14.80, 12.38, 12.36, 12.24, 13.75, 13.65, 13.41, 11.59, 11.68, 11.53, 13.92, 14.03, 14.18] for i, val in enumerate(table.field('Bmag')): assert val == Bmag[i] table = r.read("t/vizier/table5.dat") assert len(r.data.data_lines) == 49 assert len(table) == 49 assert len(table.keys()) == 10 Q = [0.289, 0.325, 0.510, 0.577, 0.539, 0.390, 0.957, 0.736, 1.435, 1.117, 1.473, 0.808, 1.416, 2.209, 0.617, 1.046, 1.604, 1.419, 1.431, 1.183, 1.210, 1.005, 0.706, 0.665, 0.340, 0.323, 0.391, 0.280, 0.343, 0.369, 0.495, 0.828, 1.113, 0.499, 1.038, 0.260, 0.863, 1.638, 0.479, 0.232, 0.627, 0.671, 0.371, 0.851, 0.607, -9.999, 1.958, 1.416, 0.949] if has_isnan: from .common import isnan for i, val in enumerate(table.field('Q')): if isnan(val): # text value for a missing value in that table assert Q[i] == -9.999 else: assert val == Q[i] if __name__ == "__main__": # run from main directory; not from test/ test_header_from_readme() test_multi_header() test_glob_header() test_description()
4e472c887799a2e7adce28ebff1ec83de8977725f18a1711be0a885293b0858f
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO from ... import ascii from .common import (assert_equal, assert_almost_equal) def assert_equal_splitlines(arg1, arg2): assert_equal(arg1.splitlines(), arg2.splitlines()) def test_read_normal(): """Normal SimpleRST Table""" table = """ # comment (with blank line above) ======= ========= Col1 Col2 ======= ========= 1.2 "hello" 2.4 's worlds ======= ========= """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ['Col1', 'Col2']) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_normal_names(): """Normal SimpleRST Table with provided column names""" table = """ # comment (with blank line above) ======= ========= Col1 Col2 ======= ========= 1.2 "hello" 2.4 's worlds ======= ========= """ reader = ascii.get_reader(Reader=ascii.RST, names=('name1', 'name2')) dat = reader.read(table) assert_equal(dat.colnames, ['name1', 'name2']) assert_almost_equal(dat[1][0], 2.4) def test_read_normal_names_include(): """Normal SimpleRST Table with provided column names""" table = """ # comment (with blank line above) ======= ========== ====== Col1 Col2 Col3 ======= ========== ====== 1.2 "hello" 3 2.4 's worlds 7 ======= ========== ====== """ reader = ascii.get_reader(Reader=ascii.RST, names=('name1', 'name2', 'name3'), include_names=('name1', 'name3')) dat = reader.read(table) assert_equal(dat.colnames, ['name1', 'name3']) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], 3) def test_read_normal_exclude(): """Nice, typical SimpleRST table with col name excluded""" table = """ ======= ========== Col1 Col2 ======= ========== 1.2 "hello" 2.4 's worlds ======= ========== """ reader = ascii.get_reader(Reader=ascii.RST, exclude_names=('Col1',)) dat = reader.read(table) assert_equal(dat.colnames, ['Col2']) assert_equal(dat[1][0], "'s worlds") def test_read_unbounded_right_column(): """The right hand column should be allowed to overflow""" table = """ # comment (with blank line above) ===== ===== ==== Col1 Col2 Col3 ===== ===== ==== 1.2 2 Hello 2.4 4 Worlds ===== ===== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat[0][2], "Hello") assert_equal(dat[1][2], "Worlds") def test_read_unbounded_right_column_header(): """The right hand column should be allowed to overflow""" table = """ # comment (with blank line above) ===== ===== ==== Col1 Col2 Col3Long ===== ===== ==== 1.2 2 Hello 2.4 4 Worlds ===== ===== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames[-1], "Col3Long") def test_read_right_indented_table(): """We should be able to read right indented tables correctly""" table = """ # comment (with blank line above) ==== ==== ==== Col1 Col2 Col3 ==== ==== ==== 3 3.4 foo 1 4.5 bar ==== ==== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ["Col1", "Col2", "Col3"]) assert_equal(dat[0][2], "foo") assert_equal(dat[1][0], 1) def test_trailing_spaces_in_row_definition(): """ Trailing spaces in the row definition column shouldn't matter""" table = ( "\n" "# comment (with blank line above)\n" " ==== ==== ==== \n" " Col1 Col2 Col3\n" " ==== ==== ==== \n" " 3 3.4 foo\n" " 1 4.5 bar\n" " ==== ==== ==== \n" ) # make sure no one accidentally deletes the trailing whitespaces in the # table. assert len(table) == 151 reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ["Col1", "Col2", "Col3"]) assert_equal(dat[0][2], "foo") assert_equal(dat[1][0], 1) table = """\ ====== =========== ============ =========== Col1 Col2 Col3 Col4 ====== =========== ============ =========== 1.2 "hello" 1 a 2.4 's worlds 2 2 ====== =========== ============ =========== """ dat = ascii.read(table, Reader=ascii.RST) def test_write_normal(): """Write a table as a normal SimpleRST Table""" out = StringIO() ascii.write(dat, out, Writer=ascii.RST) assert_equal_splitlines(out.getvalue(), """\ ==== ========= ==== ==== Col1 Col2 Col3 Col4 ==== ========= ==== ==== 1.2 "hello" 1 a 2.4 's worlds 2 2 ==== ========= ==== ==== """)
f68c10024f8867bfc5d7d19534a598a72e24344d0bcce26354b0bff1637511af
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import numpy as np from ... import ascii from .common import assert_equal def test_types_from_dat(): converters = {'a': [ascii.convert_numpy(float)], 'e': [ascii.convert_numpy(str)]} dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'], Reader=ascii.Basic, converters=converters) assert dat['a'].dtype.kind == 'f' assert dat['b'].dtype.kind == 'i' assert dat['c'].dtype.kind in ('S', 'U') assert dat['d'].dtype.kind == 'f' assert dat['e'].dtype.kind in ('S', 'U') def test_rdb_write_types(): dat = ascii.read(['a b c d', '1 1.0 cat 2.1'], Reader=ascii.Basic) out = StringIO() ascii.write(dat, out, Writer=ascii.Rdb) outs = out.getvalue().splitlines() assert_equal(outs[1], 'N\tN\tS\tN') def test_ipac_read_types(): table = r"""\ | ra | dec | sai |-----v2---| sptype | | real | float | l | real | char | | unit | unit | unit | unit | ergs | | null | null | null | null | -999 | 2.09708 2956 73765 2.06000 B8IVpMnHg """ reader = ascii.get_reader(Reader=ascii.Ipac) dat = reader.read(table) types = [ascii.FloatType, ascii.FloatType, ascii.IntType, ascii.FloatType, ascii.StrType] for (col, expected_type) in zip(reader.cols, types): assert_equal(col.type, expected_type)
415ab866c2c63f471837194e1b7a71fdd13af70a96a709faa532601de76255c1
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """ The **asdf** subpackage contains code that is used to serialize astropy types so that they can be represented and stored using the Advanced Scientific Data Format (ASDF). This subpackage defines classes, referred to as **tags**, that implement the logic for serialization and deserialization. ASDF makes use of abstract data type definitons called **schemas**. The tags provided here are simply specific implementations of particular schemas. Currently astropy only implements tags for a subset of schemas that are defined externally by the ASDF Standard. However, it is likely that astropy will eventually define schemas of its own. Astropy currently has no ability to read or write ASDF files itself. In order to process ASDF files it is necessary to make use of the standalone **asdf** package. Users should never need to refer to tag implementations directly. Their presence should be entirely transparent when processing ASDF files. If both **asdf** and **astropy** are installed, no futher configuration is required in order to process ASDF files. The **asdf** package has been designed to automatically detect the presence of the tags defined by **astropy**. Documentation on the ASDF Standard can be found `here <https://asdf-standard.readthedocs.io>`__. Documentation on the ASDF Python module can be found `here <https://asdf.readthedocs.io>`__. """
16092d85ddbcfeb7014c00eb9f5b8c98b7399b257f55cb5dede401cbdfd33605
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import six from asdf.asdftypes import CustomType, ExtensionTypeMeta __all__ = ['AstropyType', 'AstropyAsdfType'] _astropy_types = set() _astropy_asdf_types = set() class AstropyTypeMeta(ExtensionTypeMeta): """ Keeps track of `AstropyType` subclasses that are created so that they can be stored automatically by astropy extensions for ASDF. """ def __new__(mcls, name, bases, attrs): cls = super(AstropyTypeMeta, mcls).__new__(mcls, name, bases, attrs) # Classes using this metaclass are automatically added to the list of # astropy extensions if cls.organization == 'astropy.org' and cls.standard == 'astropy': _astropy_types.add(cls) elif cls.organization == 'stsci.edu' and cls.standard == 'asdf': _astropy_asdf_types.add(cls) return cls @six.add_metaclass(AstropyTypeMeta) class AstropyType(CustomType): """ This class represents types that have schemas and tags that are defined by Astropy. IMPORTANT: This parent class should **not** be used for types that have schemas that are defined by the ASDF standard. """ organization = 'astropy.org' standard = 'astropy' @six.add_metaclass(AstropyTypeMeta) class AstropyAsdfType(CustomType): """ This class represents types that have schemas that are defined in the ASDF standard, but have tags that are implemented within astropy. IMPORTANT: This parent class should **not** be used for types that also have schemas that are defined by astropy. """ organization = 'stsci.edu' standard = 'asdf'
06a9191a0649d330501f9fb95954919253bf1dafd569c188dc49e1d2446726f8
# Licensed under a 3-clause BSD style license import os def get_package_data(): # Installs the schema files schemas = [] root = os.path.join(os.path.dirname(__file__), 'schemas') for node, dirs, files in os.walk(root): for fname in files: if fname.endswith('.yaml'): schemas.append( os.path.relpath(os.path.join(node, fname), root)) # In the package directory, install to the subdirectory 'schemas' schemas = [os.path.join('schemas', s) for s in schemas] return {'astropy.io.misc.asdf': schemas}
87d347247b7412c36d95007370f29350ab488c0e2d319b464ee48fed9f1e2eb5
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import os from asdf.extension import AsdfExtension, BuiltinExtension from asdf.resolver import Resolver, DEFAULT_URL_MAPPING from asdf.util import filepath_to_url # Make sure that all tag implementations are imported by the time we create # the extension class so that _astropy_asdf_types is populated correctly. We # could do this using __init__ files, except it causes pytest import errors in # the case that asdf is not installed. from .tags.coordinates.frames import * from .tags.fits.fits import * from .tags.table.table import * from .tags.time.time import * from .tags.transform.basic import * from .tags.transform.compound import * from .tags.transform.polynomial import * from .tags.transform.projections import * from .tags.transform.tabular import * from .tags.unit.quantity import * from .tags.unit.unit import * from .types import _astropy_types, _astropy_asdf_types __all__ = ['AstropyExtension', 'AstropyAsdfExtension'] ASTROPY_SCHEMA_URI_BASE = 'http://astropy.org/schemas/' SCHEMA_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), 'schemas')) ASTROPY_URL_MAPPING = [ (ASTROPY_SCHEMA_URI_BASE, filepath_to_url( os.path.join(SCHEMA_PATH, 'astropy.org')) + '/{url_suffix}.yaml')] # This extension is used to register custom types that have both tags and # schemas defined by Astropy. class AstropyExtension(AsdfExtension): @property def types(self): return _astropy_types @property def tag_mapping(self): return [('tag:astropy.org:astropy', ASTROPY_SCHEMA_URI_BASE + 'astropy{tag_suffix}')] @property def url_mapping(self): return ASTROPY_URL_MAPPING # This extension is used to register custom tag types that have schemas defined # by ASDF, but have tag implementations defined in astropy. class AstropyAsdfExtension(BuiltinExtension): @property def types(self): return _astropy_asdf_types
8cefef05a2074a686a99a409f7a37bbe48bafa4d7f6236bfc5972d83e662f354
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to YAML serialization. Requires `pyyaml <http://pyyaml.org/>`_ to be installed. """ from io import StringIO import pytest import numpy as np from ....coordinates import SkyCoord, EarthLocation, Angle, Longitude, Latitude from .... import units as u from ....time import Time from ....table import QTable, SerializedColumn try: from ..yaml import load, load_all, dump HAS_YAML = True except ImportError: HAS_YAML = False pytestmark = pytest.mark.skipif('not HAS_YAML') @pytest.mark.parametrize('c', [True, np.uint8(8), np.int16(4), np.int32(1), np.int64(3), np.int64(2**63 - 1), 2.0, np.float64(), 3+4j, np.complex_(3 + 4j), np.complex64(3 + 4j), np.complex128(1. - 2**-52 + 1j * (1. - 2**-52))]) def test_numpy_types(c): cy = load(dump(c)) assert c == cy @pytest.mark.parametrize('c', [u.m, u.m / u.s, u.hPa, u.dimensionless_unscaled]) def test_unit(c): cy = load(dump(c)) if isinstance(c, u.CompositeUnit): assert c == cy else: assert c is cy @pytest.mark.parametrize('c', [Angle('1 2 3', unit='deg'), Longitude('1 2 3', unit='deg'), Latitude('1 2 3', unit='deg'), [[1], [3]] * u.m, np.array([[1, 2], [3, 4]], order='F'), np.array([[1, 2], [3, 4]], order='C'), np.array([1, 2, 3, 4])[::2]]) def test_ndarray_subclasses(c): cy = load(dump(c)) assert np.all(c == cy) assert c.shape == cy.shape assert type(c) is type(cy) cc = 'C_CONTIGUOUS' fc = 'F_CONTIGUOUS' if c.flags[cc] or c.flags[fc]: assert c.flags[cc] == cy.flags[cc] assert c.flags[fc] == cy.flags[fc] else: # Original was not contiguous but round-trip version # should be c-contig. assert cy.flags[cc] if hasattr(c, 'unit'): assert c.unit == cy.unit def compare_coord(c, cy): assert c.shape == cy.shape assert c.frame.name == cy.frame.name assert list(c.get_frame_attr_names()) == list(cy.get_frame_attr_names()) for attr in c.get_frame_attr_names(): assert getattr(c, attr) == getattr(cy, attr) assert (list(c.representation_component_names) == list(cy.representation_component_names)) for name in c.representation_component_names: assert np.all(getattr(c, attr) == getattr(cy, attr)) @pytest.mark.parametrize('frame', ['fk4', 'altaz']) def test_skycoord(frame): c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame=frame, obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) cy = load(dump(c)) compare_coord(c, cy) def _get_time(): t = Time([[1], [2]], format='cxcsec', location=EarthLocation(1000, 2000, 3000, unit=u.km)) t.format = 'iso' t.precision = 5 t.delta_ut1_utc = np.array([[3.0], [4.0]]) t.delta_tdb_tt = np.array([[5.0], [6.0]]) t.out_subfmt = 'date_hm' return t def compare_time(t, ty): assert type(t) is type(ty) assert np.all(t == ty) for attr in ('shape', 'jd1', 'jd2', 'format', 'scale', 'precision', 'in_subfmt', 'out_subfmt', 'location', 'delta_ut1_utc', 'delta_tdb_tt'): assert np.all(getattr(t, attr) == getattr(ty, attr)) def test_time(): t = _get_time() ty = load(dump(t)) compare_time(t, ty) def test_timedelta(): t = _get_time() dt = t - t + 0.1234556 * u.s dty = load(dump(dt)) assert type(dt) is type(dty) for attr in ('shape', 'jd1', 'jd2', 'format', 'scale'): assert np.all(getattr(dt, attr) == getattr(dty, attr)) def test_serialized_column(): sc = SerializedColumn({'name': 'hello', 'other': 1, 'other2': 2.0}) scy = load(dump(sc)) assert sc == scy def test_load_all(): t = _get_time() unit = u.m / u.s c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame='fk4', obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) # Make a multi-document stream out = ('---\n' + dump(t) + '---\n' + dump(unit) + '---\n' + dump(c)) ty, unity, cy = list(load_all(out)) compare_time(t, ty) compare_coord(c, cy) assert unity == unit @pytest.mark.skipif('not HAS_YAML') def test_ecsv_astropy_objects_in_meta(): """ Test that astropy core objects in ``meta`` are serialized. """ t = QTable([[1, 2] * u.m, [4, 5]], names=['a', 'b']) tm = _get_time() c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]], unit='deg', frame='fk4', obstime=Time('2016-01-02'), location=EarthLocation(1000, 2000, 3000, unit=u.km)) unit = u.m / u.s t.meta = {'tm': tm, 'c': c, 'unit': unit} out = StringIO() t.write(out, format='ascii.ecsv') t2 = QTable.read(out.getvalue(), format='ascii.ecsv') compare_time(tm, t2.meta['tm']) compare_coord(c, t2.meta['c']) assert t2.meta['unit'] == unit
3c203b64e68b845006c21965994ad1025dd5acfce9efb5c9ca7b9a95f0af570d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest import numpy as np from ....tests.helper import catch_warnings from ....table import Table, QTable, NdarrayMixin, Column from .... import units as u from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation from ....time import Time, TimeDelta from ....units.quantity import QuantityInfo try: import h5py except ImportError: HAS_H5PY = False else: HAS_H5PY = True try: import yaml except ImportError: HAS_YAML = False else: HAS_YAML = True ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_, '|S3'] def _default_values(dtype): if dtype == np.bool_: return [0, 1, 1] elif dtype == '|S3': return [b'abc', b'def', b'ghi'] else: return [1, 2, 3] @pytest.mark.skipif('not HAS_H5PY') def test_write_nopath(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) with pytest.raises(ValueError) as exc: t1.write(test_file) assert exc.value.args[0] == "table path should be set via the path= argument" @pytest.mark.skipif('not HAS_H5PY') def test_read_notable_nopath(tmpdir): test_file = str(tmpdir.join('test.hdf5')) h5py.File(test_file, 'w').close() # create empty file with pytest.raises(ValueError) as exc: t1 = Table.read(test_file, path='/', format='hdf5') assert exc.value.args[0] == 'no table found in HDF5 group /' @pytest.mark.skipif('not HAS_H5PY') def test_read_nopath(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table') t2 = Table.read(test_file) assert np.all(t1['a'] == t2['a']) @pytest.mark.skipif('not HAS_H5PY') def test_write_invalid_path(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) with pytest.raises(ValueError) as exc: t1.write(test_file, path='test/') assert exc.value.args[0] == "table path should end with table name, not /" @pytest.mark.skipif('not HAS_H5PY') def test_read_invalid_path(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table') with pytest.raises(OSError) as exc: Table.read(test_file, path='test/') assert exc.value.args[0] == "Path test/ does not exist" @pytest.mark.skipif('not HAS_H5PY') def test_read_missing_group(tmpdir): test_file = str(tmpdir.join('test.hdf5')) h5py.File(test_file, 'w').close() # create empty file with pytest.raises(OSError) as exc: Table.read(test_file, path='test/path/table') assert exc.value.args[0] == "Path test/path/table does not exist" @pytest.mark.skipif('not HAS_H5PY') def test_read_missing_table(tmpdir): test_file = str(tmpdir.join('test.hdf5')) with h5py.File(test_file, 'w') as f: f.create_group('test').create_group('path') with pytest.raises(OSError) as exc: Table.read(test_file, path='test/path/table') assert exc.value.args[0] == "Path test/path/table does not exist" @pytest.mark.skipif('not HAS_H5PY') def test_read_missing_group_fileobj(tmpdir): test_file = str(tmpdir.join('test.hdf5')) with h5py.File(test_file, 'w') as f: with pytest.raises(OSError) as exc: Table.read(f, path='test/path/table') assert exc.value.args[0] == "Path test/path/table does not exist" @pytest.mark.skipif('not HAS_H5PY') def test_read_write_simple(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table') t2 = Table.read(test_file, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing_table(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table') with pytest.raises(OSError) as exc: t1.write(test_file, path='the_table', append=True) assert exc.value.args[0] == "Table the_table already exists" @pytest.mark.skipif('not HAS_H5PY') def test_read_write_memory(tmpdir): with h5py.File('test', 'w', driver='core', backing_store=False) as output_file: t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(output_file, path='the_table') t2 = Table.read(output_file, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing(tmpdir): test_file = str(tmpdir.join('test.hdf5')) h5py.File(test_file, 'w').close() # create empty file t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) with pytest.raises(OSError) as exc: t1.write(test_file, path='the_table') assert exc.value.args[0].startswith("File exists:") @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing_overwrite(tmpdir): test_file = str(tmpdir.join('test.hdf5')) h5py.File(test_file, 'w').close() # create empty file t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table', overwrite=True) t2 = Table.read(test_file, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing_append(tmpdir): test_file = str(tmpdir.join('test.hdf5')) h5py.File(test_file, 'w').close() # create empty file t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table_1', append=True) t1.write(test_file, path='the_table_2', append=True) t2 = Table.read(test_file, path='the_table_1') assert np.all(t2['a'] == [1, 2, 3]) t3 = Table.read(test_file, path='the_table_2') assert np.all(t3['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing_append_groups(tmpdir): test_file = str(tmpdir.join('test.hdf5')) with h5py.File(test_file, 'w') as f: f.create_group('test_1') t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='test_1/the_table_1', append=True) t1.write(test_file, path='test_2/the_table_2', append=True) t2 = Table.read(test_file, path='test_1/the_table_1') assert np.all(t2['a'] == [1, 2, 3]) t3 = Table.read(test_file, path='test_2/the_table_2') assert np.all(t3['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_write_existing_append_overwrite(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='table1') t1.write(test_file, path='table2', append=True) t1v2 = Table() t1v2.add_column(Column(name='a', data=[4, 5, 6])) with pytest.raises(OSError) as exc: t1v2.write(test_file, path='table1', append=True) assert exc.value.args[0] == 'Table table1 already exists' t1v2.write(test_file, path='table1', append=True, overwrite=True) t2 = Table.read(test_file, path='table1') assert np.all(t2['a'] == [4, 5, 6]) t3 = Table.read(test_file, path='table2') assert np.all(t3['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_fileobj(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='the_table') import h5py with h5py.File(test_file, 'r') as input_file: t2 = Table.read(input_file, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_filobj_path(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='path/to/data/the_table') import h5py with h5py.File(test_file, 'r') as input_file: t2 = Table.read(input_file, path='path/to/data/the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_filobj_group_path(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(test_file, path='path/to/data/the_table') import h5py with h5py.File(test_file, 'r') as input_file: t2 = Table.read(input_file['path/to'], path='data/the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_read_wrong_fileobj(): class FakeFile: def read(self): pass f = FakeFile() with pytest.raises(TypeError) as exc: t1 = Table.read(f, format='hdf5') assert exc.value.args[0] == 'h5py can only open regular files' @pytest.mark.skipif('not HAS_H5PY') def test_write_fileobj(tmpdir): test_file = str(tmpdir.join('test.hdf5')) import h5py with h5py.File(test_file, 'w') as output_file: t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(output_file, path='the_table') t2 = Table.read(test_file, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_write_filobj_group(tmpdir): test_file = str(tmpdir.join('test.hdf5')) import h5py with h5py.File(test_file, 'w') as output_file: t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(output_file, path='path/to/data/the_table') t2 = Table.read(test_file, path='path/to/data/the_table') assert np.all(t2['a'] == [1, 2, 3]) @pytest.mark.skipif('not HAS_H5PY') def test_write_wrong_type(): t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) with pytest.raises(TypeError) as exc: t1.write(1212, path='path/to/data/the_table', format='hdf5') assert exc.value.args[0] == ('output should be a string ' 'or an h5py File or Group object') @pytest.mark.skipif('not HAS_H5PY') @pytest.mark.parametrize(('dtype'), ALL_DTYPES) def test_preserve_single_dtypes(tmpdir, dtype): test_file = str(tmpdir.join('test.hdf5')) values = _default_values(dtype) t1 = Table() t1.add_column(Column(name='a', data=np.array(values, dtype=dtype))) t1.write(test_file, path='the_table') t2 = Table.read(test_file, path='the_table') assert np.all(t2['a'] == values) assert t2['a'].dtype == dtype @pytest.mark.skipif('not HAS_H5PY') def test_preserve_all_dtypes(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() for dtype in ALL_DTYPES: values = _default_values(dtype) t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype))) t1.write(test_file, path='the_table') t2 = Table.read(test_file, path='the_table') for dtype in ALL_DTYPES: values = _default_values(dtype) assert np.all(t2[str(dtype)] == values) assert t2[str(dtype)].dtype == dtype @pytest.mark.skipif('not HAS_H5PY') def test_preserve_meta(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.meta['a'] = 1 t1.meta['b'] = 'hello' t1.meta['c'] = 3.14159 t1.meta['d'] = True t1.meta['e'] = np.array([1, 2, 3]) t1.write(test_file, path='the_table') t2 = Table.read(test_file, path='the_table') for key in t1.meta: assert np.all(t1.meta[key] == t2.meta[key]) @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_preserve_serialized(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1['a'] = Column(data=[1, 2, 3], unit="s") t1['a'].meta['a0'] = "A0" t1['a'].meta['a1'] = {"a1": [0, 1]} t1['a'].format = '7.3f' t1['a'].description = 'A column' t1.meta['b'] = 1 t1.meta['c'] = {"c0": [0, 1]} t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True) t2 = Table.read(test_file, path='the_table') assert t1['a'].unit == t2['a'].unit assert t1['a'].format == t2['a'].format assert t1['a'].description == t2['a'].description assert t1['a'].meta == t2['a'].meta assert t1.meta == t2.meta @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_preserve_serialized_compatibility_mode(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1['a'] = Column(data=[1, 2, 3], unit="s") t1['a'].meta['a0'] = "A0" t1['a'].meta['a1'] = {"a1": [0, 1]} t1['a'].format = '7.3f' t1['a'].description = 'A column' t1.meta['b'] = 1 t1.meta['c'] = {"c0": [0, 1]} with catch_warnings() as w: t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True, compatibility_mode=True) assert str(w[0].message).startswith( "compatibility mode for writing is deprecated") t2 = Table.read(test_file, path='the_table') assert t1['a'].unit == t2['a'].unit assert t1['a'].format == t2['a'].format assert t1['a'].description == t2['a'].description assert t1['a'].meta == t2['a'].meta assert t1.meta == t2.meta @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_preserve_serialized_in_complicated_path(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1['a'] = Column(data=[1, 2, 3], unit="s") t1['a'].meta['a0'] = "A0" t1['a'].meta['a1'] = {"a1": [0, 1]} t1['a'].format = '7.3f' t1['a'].description = 'A column' t1.meta['b'] = 1 t1.meta['c'] = {"c0": [0, 1]} t1.write(test_file, path='the_table/complicated/path', serialize_meta=True, overwrite=True) t2 = Table.read(test_file, path='the_table/complicated/path') assert t1['a'].format == t2['a'].format assert t1['a'].unit == t2['a'].unit assert t1['a'].description == t2['a'].description assert t1['a'].meta == t2['a'].meta assert t1.meta == t2.meta @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_metadata_very_large(tmpdir): """Test that very large datasets work, now!""" test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1['a'] = Column(data=[1, 2, 3], unit="s") t1['a'].meta['a0'] = "A0" t1['a'].meta['a1'] = {"a1": [0, 1]} t1['a'].format = '7.3f' t1['a'].description = 'A column' t1.meta['b'] = 1 t1.meta['c'] = {"c0": [0, 1]} t1.meta["meta_big"] = "0" * (2 ** 16 + 1) t1.meta["meta_biggerstill"] = "0" * (2 ** 18) t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True) t2 = Table.read(test_file, path='the_table') assert t1['a'].unit == t2['a'].unit assert t1['a'].format == t2['a'].format assert t1['a'].description == t2['a'].description assert t1['a'].meta == t2['a'].meta assert t1.meta == t2.meta @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_metadata_very_large_fails_compatibility_mode(tmpdir): """Test that very large metadata do not work in compatibility mode.""" test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1['a'] = Column(data=[1, 2, 3]) t1.meta["meta"] = "0" * (2 ** 16 + 1) with catch_warnings() as w: t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True, compatibility_mode=True) assert len(w) == 2 # Error message slightly changed in h5py 2.7.1, thus the 2part assert assert str(w[1].message).startswith( "Attributes could not be written to the output HDF5 " "file: Unable to create attribute ") assert "bject header message is too large" in str(w[1].message) @pytest.mark.skipif('not HAS_H5PY') def test_skip_meta(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.meta['a'] = 1 t1.meta['b'] = 'hello' t1.meta['c'] = 3.14159 t1.meta['d'] = True t1.meta['e'] = np.array([1, 2, 3]) t1.meta['f'] = str with catch_warnings() as w: t1.write(test_file, path='the_table') assert len(w) == 1 assert str(w[0].message).startswith( "Attribute `f` of type {0} cannot be written to HDF5 files - skipping".format(type(t1.meta['f']))) @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_fail_meta_serialize(tmpdir): test_file = str(tmpdir.join('test.hdf5')) t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.meta['f'] = str with pytest.raises(Exception) as err: t1.write(test_file, path='the_table', serialize_meta=True) assert "cannot represent an object: <class 'str'>" in str(err) @pytest.mark.skipif('not HAS_H5PY') def test_read_h5py_objects(tmpdir): # Regression test - ensure that Datasets are recognized automatically test_file = str(tmpdir.join('test.hdf5')) import h5py with h5py.File(test_file, 'w') as output_file: t1 = Table() t1.add_column(Column(name='a', data=[1, 2, 3])) t1.write(output_file, path='the_table') f = h5py.File(test_file) t2 = Table.read(f, path='the_table') assert np.all(t2['a'] == [1, 2, 3]) t3 = Table.read(f['/'], path='the_table') assert np.all(t3['a'] == [1, 2, 3]) t4 = Table.read(f['the_table']) assert np.all(t4['a'] == [1, 2, 3]) f.close() # don't raise an error in 'test --open-files' def assert_objects_equal(obj1, obj2, attrs, compare_class=True): if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta'] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split('.'): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] # Mixin info.meta can None instead of empty OrderedDict(), #6720 would # fix this. if attr == 'info.meta': if a1 is None: a1 = {} if a2 is None: a2 = {} assert np.all(a1 == a2) # Testing HDF5 table read/write with mixins. This is mostly # copied from FITS mixin testing. el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km) el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5') scc = sc.copy() scc.representation = 'cartesian' tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el) mixin_cols = { 'tm': tm, 'dt': TimeDelta([1, 2] * u.day), 'sc': sc, 'scc': scc, 'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4', obstime=['J1990.5', 'J1991.5']), 'q': [1, 2] * u.m, 'lat': Latitude([1, 2] * u.deg), 'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg), 'ang': Angle([1, 2] * u.deg), 'el2': el2, } time_attrs = ['value', 'shape', 'format', 'scale', 'location'] compare_attrs = { 'c1': ['data'], 'c2': ['data'], 'tm': time_attrs, 'dt': ['shape', 'value', 'format', 'scale'], 'sc': ['ra', 'dec', 'representation', 'frame.name'], 'scc': ['x', 'y', 'z', 'representation', 'frame.name'], 'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'], 'q': ['value', 'unit'], 'lon': ['value', 'unit', 'wrap_angle'], 'lat': ['value', 'unit'], 'ang': ['value', 'unit'], 'el2': ['x', 'y', 'z', 'ellipsoid'], 'nd': ['x', 'y', 'z'], } @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') def test_hdf5_mixins_qtable_to_table(tmpdir): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ filename = str(tmpdir.join('test_simple.hdf5')) names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) t.write(filename, format='hdf5', path='root', serialize_meta=True) t2 = Table.read(filename, format='hdf5', path='root') assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] # Special-case Time, which does not yet support round-tripping # the format. if isinstance(col2, Time): col2.format = col.format attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ['unit'] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.all(col.value == col2) assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_hdf5_mixins_as_one(table_cls, tmpdir): """Test write/read all cols at once and validate intermediate column names""" filename = str(tmpdir.join('test_simple.hdf5')) names = sorted(mixin_cols) serialized_names = ['ang', 'dt.jd1', 'dt.jd2', 'el2.x', 'el2.y', 'el2.z', 'lat', 'lon', 'q', 'sc.ra', 'sc.dec', 'scc.x', 'scc.y', 'scc.z', 'scd.ra', 'scd.dec', 'scd.distance', 'scd.obstime.jd1', 'scd.obstime.jd2', 'tm.jd1', 'tm.jd2', ] t = table_cls([mixin_cols[name] for name in names], names=names) t.meta['C'] = 'spam' t.meta['comments'] = ['this', 'is', 'a', 'comment'] t.meta['history'] = ['first', 'second', 'third'] t.write(filename, format="hdf5", path='root', serialize_meta=True) t2 = table_cls.read(filename, format='hdf5', path='root') assert t2.meta['C'] == 'spam' assert t2.meta['comments'] == ['this', 'is', 'a', 'comment'] assert t2.meta['history'] == ['first', 'second', 'third'] assert t.colnames == t2.colnames # Read directly via hdf5 and confirm column names h5 = h5py.File(filename, 'r') assert list(h5['root'].dtype.names) == serialized_names h5.close() @pytest.mark.skipif('not HAS_H5PY or not HAS_YAML') @pytest.mark.parametrize('name_col', list(mixin_cols.items())) @pytest.mark.parametrize('table_cls', (Table, QTable)) def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir): """Test write/read one col at a time and do detailed validation""" filename = str(tmpdir.join('test_simple.hdf5')) name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=['c1', name, 'c2']) t[name].info.description = 'my description' t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}} if not t.has_mixin_columns: pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)') if isinstance(t[name], NdarrayMixin): pytest.xfail('NdarrayMixin not supported') t.write(filename, format="hdf5", path='root', serialize_meta=True) t2 = table_cls.read(filename, format='hdf5', path='root') assert t.colnames == t2.colnames for colname in t.colnames: assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) # Special case to make sure Column type doesn't leak into Time class data if name.startswith('tm'): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray @pytest.mark.skipif('HAS_YAML or not HAS_H5PY') def test_warn_for_dropped_info_attributes(tmpdir): filename = str(tmpdir.join('test.hdf5')) t = Table([[1, 2]]) t['col0'].info.description = 'hello' with catch_warnings() as warns: t.write(filename, path='root', serialize_meta=False) assert len(warns) == 1 assert str(warns[0].message).startswith( "table contains column(s) with defined 'unit'") @pytest.mark.skipif('HAS_YAML or not HAS_H5PY') def test_error_for_mixins_but_no_yaml(tmpdir): filename = str(tmpdir.join('test.hdf5')) t = Table([mixin_cols['sc']]) with pytest.raises(TypeError) as err: t.write(filename, path='root', serialize_meta=True) assert "cannot write type SkyCoord column 'col0' to HDF5 without PyYAML" in str(err)
c5590daa0dbae6e598534376e2001eee076428a188ddc9eabdd0c430fbae9526
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest from .. import fnpickle, fnunpickle from ....tests.helper import catch_warnings from ....utils.exceptions import AstropyDeprecationWarning def test_fnpickling_simple(tmpdir): """ Tests the `fnpickle` and `fnupickle` functions' basic operation by pickling and unpickling a string, using both a filename and a file. """ fn = str(tmpdir.join('test1.pickle')) obj1 = 'astring' fnpickle(obj1, fn) res = fnunpickle(fn, 0) assert obj1 == res # now try with a file-like object instead of a string with open(fn, 'wb') as f: fnpickle(obj1, f) with open(fn, 'rb') as f: res = fnunpickle(f) assert obj1 == res with catch_warnings(AstropyDeprecationWarning): fnunpickle(fn, 0, True) class ToBePickled: def __init__(self, item): self.item = item def __eq__(self, other): if isinstance(other, ToBePickled): return self.item == other.item else: return False def test_fnpickling_class(tmpdir): """ Tests the `fnpickle` and `fnupickle` functions' ability to pickle and unpickle custom classes. """ fn = str(tmpdir.join('test2.pickle')) obj1 = 'astring' obj2 = ToBePickled(obj1) fnpickle(obj2, fn) res = fnunpickle(fn) assert res == obj2 def test_fnpickling_protocol(tmpdir): """ Tests the `fnpickle` and `fnupickle` functions' ability to pickle and unpickle pickle files from all protcols. """ import pickle obj1 = 'astring' obj2 = ToBePickled(obj1) for p in range(pickle.HIGHEST_PROTOCOL + 1): fn = str(tmpdir.join('testp{}.pickle'.format(p))) fnpickle(obj2, fn, protocol=p) res = fnunpickle(fn) assert res == obj2 def test_fnpickling_many(tmpdir): """ Tests the `fnpickle` and `fnupickle` functions' ability to pickle and unpickle multiple objects from a single file. """ fn = str(tmpdir.join('test3.pickle')) # now try multiples obj3 = 328.3432 obj4 = 'blahblahfoo' fnpickle(obj3, fn) fnpickle(obj4, fn, append=True) res = fnunpickle(fn, number=-1) assert len(res) == 2 assert res[0] == obj3 assert res[1] == obj4 fnpickle(obj4, fn, append=True) res = fnunpickle(fn, number=2) assert len(res) == 2 with pytest.raises(EOFError): fnunpickle(fn, number=5)
c999981ea65284641b7af7a3197f91d97659a006ded67971f520150b9fe7e938
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- import numpy as np from asdf import yamlutil from asdf.tags.core.ndarray import NDArrayType from astropy import table from ...types import AstropyAsdfType class TableType(AstropyAsdfType): name = 'core/table' types = ['astropy.table.Table'] requires = ['astropy'] @classmethod def from_tree(cls, node, ctx): columns = [ yamlutil.tagged_tree_to_custom_tree(c, ctx) for c in node['columns'] ] return table.Table(columns, meta=node.get('meta', {})) @classmethod def to_tree(cls, data, ctx): columns = [] for name in data.colnames: column = yamlutil.custom_tree_to_tagged_tree( data.columns[name], ctx) columns.append(column) node = {'columns': columns} if data.meta: node['meta'] = data.meta return node @classmethod def assert_equal(cls, old, new): assert old.meta == new.meta NDArrayType.assert_equal(np.array(old), np.array(new)) class ColumnType(AstropyAsdfType): name = 'core/column' types = ['astropy.table.Column', 'astropy.table.MaskedColumn'] requires = ['astropy'] handle_dynamic_subclasses = True @classmethod def from_tree(cls, node, ctx): data = yamlutil.tagged_tree_to_custom_tree( node['data'], ctx) name = node['name'] description = node.get('description') unit = node.get('unit') meta = node.get('meta', None) return table.Column( data=data._make_array(), name=name, description=description, unit=unit, meta=meta) @classmethod def to_tree(cls, data, ctx): node = { 'data': yamlutil.custom_tree_to_tagged_tree( data.data, ctx), 'name': data.name } if data.description: node['description'] = data.description if data.unit: node['unit'] = yamlutil.custom_tree_to_tagged_tree( data.unit, ctx) if data.meta: node['meta'] = data.meta return node @classmethod def assert_equal(cls, old, new): assert old.meta == new.meta assert old.description == new.description assert old.unit == new.unit NDArrayType.assert_equal(np.array(old), np.array(new))
2c0bd4d5d71b34162225974cb40dda7404b1faedfcc1b7866f0099fd450f0b67
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- def get_package_data(): # pragma: no cover return { str('astropy.io.misc.asdf.tags.fits.tests'): ['data/*.fits'] }