hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
f641d2e3a69d10bbc37cb5a473440b4145054f169a5254c392fa12edf132e6b1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from . import parse, from_table
from .tree import VOTableFile, Table as VOTable
from astropy.io import registry as io_registry
from astropy.table import Table
from astropy.table.column import BaseColumn
from astropy.units import Quantity
def is_votable(origin, filepath, fileobj, *args, **kwargs):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
from . import is_votable
if origin == 'read':
if fileobj is not None:
try:
result = is_votable(fileobj)
finally:
fileobj.seek(0)
return result
elif filepath is not None:
return is_votable(filepath)
elif isinstance(args[0], (VOTableFile, VOTable)):
return True
else:
return False
else:
return False
def read_table_votable(input, table_id=None, use_names_over_ids=False, verify=None):
"""
Read a Table object from an VO table file
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.Table`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.Table` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
"""
if not isinstance(input, (VOTableFile, VOTable)):
input = parse(input, table_id=table_id, verify=verify)
# Parse all table objects
table_id_mapping = dict()
tables = []
if isinstance(input, VOTableFile):
for table in input.iter_tables():
if table.ID is not None:
table_id_mapping[table.ID] = table
tables.append(table)
if len(tables) > 1:
if table_id is None:
raise ValueError(
"Multiple tables found: table id should be set via "
"the table_id= argument. The available tables are {0}, "
'or integers less than {1}.'.format(
', '.join(table_id_mapping.keys()), len(tables)))
elif isinstance(table_id, str):
if table_id in table_id_mapping:
table = table_id_mapping[table_id]
else:
raise ValueError(
"No tables with id={0} found".format(table_id))
elif isinstance(table_id, int):
if table_id < len(tables):
table = tables[table_id]
else:
raise IndexError(
"Table index {0} is out of range. "
"{1} tables found".format(
table_id, len(tables)))
elif len(tables) == 1:
table = tables[0]
else:
raise ValueError("No table found")
elif isinstance(input, VOTable):
table = input
# Convert to an astropy.table.Table object
return table.to_table(use_names_over_ids=use_names_over_ids)
def write_table_votable(input, output, table_id=None, overwrite=False,
tabledata_format=None):
"""
Write a Table object to an VO table file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`votable-serialization`.
"""
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError('cannot write table with mixin column(s) {0} to VOTable'
.format(unsupported_names))
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Create a new VOTable file
table_file = from_table(input, table_id=table_id)
# Write out file
table_file.to_xml(output, tabledata_format=tabledata_format)
io_registry.register_reader('votable', Table, read_table_votable)
io_registry.register_writer('votable', Table, write_table_votable)
io_registry.register_identifier('votable', Table, is_votable)
|
a5b1a29a8504932be2cb1814596a2f3ec132d1f01a71efbec2c465e01d42a2e6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import io
import re
import sys
import gzip
import base64
import codecs
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from astropy.io import fits
from astropy import __version__ as astropy_version
from astropy.utils.collections import HomogeneousList
from astropy.utils.xml.writer import XMLWriter
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import InheritDocstrings
from . import converters
from .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,
warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,
W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,
W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,
W44, W45, W50, W52, W53, E06, E08, E09, E10, E11, E12,
E13, E15, E16, E17, E18, E19, E20, E21)
from . import ucd as ucd_mod
from . import util
from . import xmlutil
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys',
'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',
'VOTableFile'
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
"{} references itself".format(element_name),
element._config, element._pos, KeyError)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name))
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
"{} references itself".format(element_name),
element._config, element._pos, KeyError)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name))
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# In the future, this should take into account the VOTable
# version.
return 'cds'
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get('unit_format') is None:
format = _get_default_unit_format(config)
else:
format = config['unit_format']
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if (year is not None and
re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None):
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get('version_1_1_or_later'):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get(
'version_1_2_or_later', False),
has_colon=config.get('version_1_2_or_later', False))
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get('verify', 'ignore') == 'exception':
vo_raise(W06, (ucd, str(e)), config, pos)
elif config.get('verify', 'ignore') == 'warn':
vo_warn(W06, (ucd, str(e)), config, pos)
return False
else:
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(
W28, W28, ('xtype', self._element_name, '1.2'),
self._config, self._pos)
check_string(xtype, 'xtype', self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (self._utype_in_v1_2 and
utype is not None and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('utype', self._element_name, '1.2'),
self._config, self._pos)
check_string(utype, 'utype', self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == '':
ucd = None
if ucd is not None:
if (self._ucd_in_v1_2 and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('ucd', self._element_name, '1.2'),
self._config, self._pos)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element(metaclass=InheritDocstrings):
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ''
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get('version_1_1_or_later'):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterator
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : Element
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name,
attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, self._content,
attrib=w.object_attrs(self, self._attr_list))
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',
'href', 'action']
_element_name = 'LINK'
def __init__(self, ID=None, title=None, value=None, href=None, action=None,
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get('content-role') or kwargs.get('content_role')
content_type = kwargs.get('content-type') or kwargs.get('content_type')
if 'gref' in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
'LINK', kwargs.keys(), config, pos,
['content-role', 'content_role', 'content-type', 'content_type',
'gref'])
@property
def content_role(self):
"""
Defines the MIME role of the referenced object. Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if ((content_role == 'type' and
not self._config['version_1_3_or_later']) or
content_role not in
(None, 'query', 'hints', 'doc', 'location')):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault('links', [])
column.meta['links'].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,
_UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = 'INFO'
_attr_list_11 = ['ID', 'name', 'value']
_attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']
_utype_in_v1_2 = True
def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,
ref=None, unit=None, ucd=None, utype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs('INFO', ['xtype'], config, pos)
if ref is not None:
warn_unknown_attrs('INFO', ['ref'], config, pos)
if unit is not None:
warn_unknown_attrs('INFO', ['unit'], config, pos)
if ucd is not None:
warn_unknown_attrs('INFO', ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs('INFO', ['utype'], config, pos)
warn_unknown_attrs('INFO', extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, ('name'), self._config, self._pos)
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, ('value'), self._config, self._pos)
check_string(value, 'value', self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),
self._config, self._pos)
xmlutil.check_id(ref, 'ref', self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
if not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),
self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
w.element(self._element_name, self._content,
attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, field, ID=None, null=None, ref=None,
type="legal", id=None, config=None, pos=None, **extras):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs('VALUES', extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
'0', self._config, self._pos)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""
[*required*] Defines the applicability of the domain defined
by this VALUES_ element. Must be one of the following
strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('legal', 'actual'):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,
self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, 'converter') and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == 'yes':
self._min_inclusive = True
elif inclusive == 'no':
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, 'converter') and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == 'yes':
self._max_inclusive = True
elif inclusive == 'no':
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != 'VALUES':
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == 'MIN':
if 'value' not in data:
vo_raise(E09, 'MIN', config, pos)
self.min = data['value']
self.min_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MIN', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'MAX':
if 'value' not in data:
vo_raise(E09, 'MAX', config, pos)
self.max = data['value']
self.max_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MAX', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'OPTION':
if 'value' not in data:
vo_raise(E09, 'OPTION', config, pos)
xmlutil.check_token(
data.get('name'), 'name', config, pos)
self.options.append(
(data.get('name'), data.get('value')))
warn_unknown_attrs(
'OPTION', data.keys(), config, pos,
['data', 'name'])
elif tag == 'VALUES':
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (self.ref is None and self.null is None and self.ID is None and
self.max is None and self.min is None and self.options == [])
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return 'yes'
return 'no'
if self.is_defaults():
return
if self.ref is not None:
w.element('VALUES', attrib=w.object_attrs(self, ['ref']))
else:
with w.tag('VALUES',
attrib=w.object_attrs(
self, ['ID', 'null', 'ref'])):
if self.min is not None:
w.element(
'MIN',
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive))
if self.max is not None:
w.element(
'MAX',
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive))
for name, value in self.options:
w.element(
'OPTION',
name=name,
value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ['ID', 'null']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta['min'] = {
'value': self.min,
'inclusive': self.min_inclusive}
if self.max is not None:
meta['max'] = {
'value': self.max,
'inclusive': self.max_inclusive}
if len(self.options):
meta['options'] = dict(self.options)
column.meta['values'] = meta
def from_table_column(self, column):
if column.info.meta is None or 'values' not in column.info.meta:
return
meta = column.info.meta['values']
for key in ['ID', 'null']:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if 'min' in meta:
self.min = meta['min']['value']
self.min_inclusive = meta['min']['inclusive']
if 'max' in meta:
self.max = meta['max']['value']
self.max_inclusive = meta['max']['inclusive']
if 'options' in meta:
self._options = list(meta['options'].items())
class Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,
_UtypeProperty, _UcdProperty):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',
'unit', 'width', 'precision', 'utype', 'ref']
_attr_list_12 = _attr_list_11 + ['xtype']
_element_name = 'FIELD'
def __init__(self, votable, ID=None, name=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, ref=None, type=None, id=None,
xtype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ['xtype'], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from archive.noao.edu. It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and
ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and
datatype == 'double'):
datatype = 'char'
arraysize = '3'
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
if name is None:
if (self._element_name == 'PARAM' and
not config.get('version_1_1_or_later')):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
'string': 'char',
'unicodeString': 'unicodeChar',
'int16': 'short',
'int32': 'int',
'int64': 'long',
'float32': 'float',
'float64': 'double',
# The following appear in some Vizier tables
'unsignedInt': 'long',
'unsignedShort': 'int'
}
datatype_mapping.update(config.get('datatype_mapping', {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),
config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + "_{:d}".format(i)
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + " {:d}".format(i)
i += 1
if (not implicit and
new_name != field.name):
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""
[*required*] The datatype of the column. Valid values (as
defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get('version_1_1_or_later'):
warn_or_raise(E10, E10, self._element_name, self._config,
self._pos)
datatype = 'char'
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if (arraysize is not None and
not re.match(r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize)):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == 'VALUES':
self.values.__init__(
self._votable, self, config=config, pos=pos, **data)
self.values.parse(iterator, config)
elif tag == 'LINK':
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == 'DESCRIPTION':
warn_unknown_attrs(
'DESCRIPTION', data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(
W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in
self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element('DESCRIPTION', self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (isinstance(self.converter, converters.FloatingPoint) and
self.converter.output_format != '{!r:>}'):
column.format = self.converter.output_format
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs['unit'] = column.info.unit
kwargs['name'] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and 'links' in meta:
for link in meta['links']:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ['value']
_attr_list_12 = Field._attr_list_12 + ['value']
_element_name = 'PARAM'
def __init__(self, votable, ID=None, name=None, value=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, type=None, id=None, config=None,
pos=None, **extra):
self._value = value
Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,
arraysize=arraysize, ucd=ucd, unit=unit,
precision=precision, utype=utype, type=type,
id=id, config=config, pos=pos, **extra)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(
value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'equinox', 'epoch', 'system']
_element_name = 'COOSYS'
def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
if config.get('version_1_2_or_later'):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs('COOSYS', extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get('version_1_1_or_later'):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def system(self):
"""
Specifies the type of coordinate system. Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',
'galactic', 'supergalactic', 'xy', 'barycentric',
'geo_app'):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, 'equinox', self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, 'epoch', self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,
**extra):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(
"No field named '{}'".format(self.ref),
self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(
"No params named '{}'".format(self.ref),
self._config, self._pos, KeyError)
class Group(Element, _IDProperty, _NameProperty, _UtypeProperty,
_UcdProperty, _DescriptionProperty):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, table, ID=None, name=None, ref=None, ucd=None,
utype=None, id=None, config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList(
(FieldRef, ParamRef, Group, Param))
warn_unknown_attrs('GROUP', extra.keys(), config, pos)
def __repr__(self):
return '<GROUP>... {0} entries ...</GROUP>'.format(len(self._entries))
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
'FIELDref': self._add_fieldref,
'PARAMref': self._add_paramref,
'PARAM': self._add_param,
'GROUP': self._add_group,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'GROUP', config, pos)
self.description = data or None
elif tag == 'GROUP':
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
'GROUP',
attrib=w.object_attrs(
self, ['ID', 'name', 'ref', 'ucd', 'utype'])):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
for field in entry.iter_fields_and_params():
yield field
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
for group in entry.iter_groups():
yield group
class Table(Element, _IDProperty, _NameProperty, _UcdProperty,
_DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,
utype=None, nrows=None, id=None, config=None, pos=None,
**extra):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
xmlutil.check_id(ref, 'ref', config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = 'tabledata'
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs('TABLE', extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(
W43, W43, ('TABLE', self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""
[*required*] The serialization format of the table. Must be
one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == 'fits':
vo_raise("fits format can not be written out, only read.",
self._config, self._pos, NotImplementedError)
if format == 'binary2':
if not self._config['version_1_3_or_later']:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config, self._pos)
elif format not in ('tabledata', 'binary'):
vo_raise("Invalid format '{}'".format(format),
self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype='O')
mask = np.zeros((nrows,), dtype='b')
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == 'O' and 'O') or 'bool'
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get('columns')
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get('table_number')
current_table_number = config.get('_current_table_number')
skip_table = False
if current_table_number is not None:
config['_current_table_number'] += 1
if (table_number is not None and
table_number != current_table_number):
skip_table = True
self._empty = True
table_id = config.get('table_id')
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
else:
if tag == 'TABLE':
return self
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
else:
tag_mapping = {
'FIELD': self._add_field,
'PARAM': self._add_param,
'GROUP': self._add_group,
'LINK': self._add_link,
'INFO': self._add_info,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'TABLE':
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError(
"Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(
"Columns '{}' not found in fields list".format(columns))
else:
raise TypeError("Invalid columns list")
if not skip_table:
for start, tag, data, pos in iterator:
if start:
if tag == 'TABLEDATA':
warn_unknown_attrs(
'TABLEDATA', data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config)
break
elif tag == 'BINARY':
warn_unknown_attrs(
'BINARY', data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos)
break
elif tag == 'BINARY2':
if not config['version_1_3_or_later']:
warn_or_raise(
W52, W52, config['version'], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos)
break
elif tag == 'FITS':
warn_unknown_attrs(
'FITS', data.keys(), config, pos, ['extnum'])
try:
extnum = int(data.get('extnum', 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(
iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == 'DATA':
break
for start, tag, data, pos in iterator:
if start and tag == 'INFO':
if not config.get('version_1_2_or_later'):
warn_or_raise(
W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == 'TABLE':
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get('invalid', 'exception')
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == 'TR':
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = (data.get('encoding', None) == 'base64')
warn_unknown_attrs(
tag, data.keys(), config, pos, ['encoding'])
else:
if tag == 'TD':
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(
data.encode('ascii'))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](
buf.read)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
else:
try:
value, mask_value = parsers[i](
data, config, pos)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
except Exception as e:
if invalid == 'exception':
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == 'TR':
break
else:
self._add_unknown_tag(
iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows:numrows + chunk_size] = array_chunk
array.mask[numrows:numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == 'TABLEDATA':
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if (self.nrows is not None and
self.nrows >= 0 and
self.nrows != numrows):
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
if 'href' not in data:
have_local_stream = True
if data.get('encoding', None) != 'base64':
warn_or_raise(
W38, W38, data.get('encoding', None),
config, pos)
else:
href = data['href']
xmlutil.check_anyuri(href, config, pos)
encoding = data.get('encoding', None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode('ascii'))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, " +
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'rb', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
"Unknown encoding type '{}'".format(encoding),
self._config, self._pos, NotImplementedError)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(converters.bitarray_to_bool(
mask_bits, len(fields)))
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e, config, pos, "(in row {:d}, col '{}')".format(
numrows, fields[i].ID))
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
href = data['href']
encoding = data.get('encoding', None)
else:
break
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, "
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'r', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
"Unknown encoding type '{}'".format(encoding),
self._config, self._pos, NotImplementedError)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get('tabledata_format')
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == 'fits':
format = 'tabledata'
with w.tag(
'TABLE',
attrib=w.object_attrs(
self,
('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups,
self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs['version_1_2_or_later']:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID="_g{0}".format(index))
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag('DATA'):
if format == 'tabledata':
self._write_tabledata(w, **kwargs)
elif format == 'binary':
self._write_binary(1, w, **kwargs)
elif format == 'binary2':
self._write_binary(2, w, **kwargs)
if kwargs['version_1_2_or_later']:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag('TABLEDATA'):
w._flush()
if (_has_c_tabledata_writer and
not kwargs.get('_debug_python_based_parser')):
supports_empty_values = [
field.converter.supports_empty_values(kwargs)
for field in fields]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write, array.data, array.mask, fields,
supports_empty_values, indent, 1 << 8)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [(i, field.converter.output,
field.converter.supports_empty_values(kwargs))
for i, field in enumerate(fields)]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID))
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = 'BINARY'
else:
tag_name = 'BINARY2'
with w.tag(tag_name):
with w.tag('STREAM', encoding='base64'):
fields_basic = [(i, field.converter.binoutput)
for (i, field) in enumerate(fields)]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional="(in row {:d}, col '{}')".format(
row, fields[i].ID))
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode('ascii'))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from astropy.table import Table
meta = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = '{0}{1}'.format(name, i)
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype']:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if 'description' in table.meta:
new_table.description = table.meta['description']
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table),
mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
for param in self.params:
yield param
for field in self.fields:
yield field
for group in self.groups:
for field in group.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID or name.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
for g in group.iter_groups():
yield g
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""")
def iter_info(self):
for info in self.infos:
yield info
class Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,
_DescriptionProperty):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, name=None, ID=None, utype=None, type='results',
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(
self._element_name,
attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""
[*required*] The type of the resource. Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('results', 'meta'):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""
A dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. (The specification explicitly allows
for extra attributes here, but nowhere else.)
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
'TABLE': self._add_table,
'INFO': self._add_info,
'PARAM': self._add_param,
'GROUP' : self._add_group,
'COOSYS': self._add_coosys,
'RESOURCE': self._add_resource,
'LINK': self._add_link,
'DESCRIPTION': self._ignore_add
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'RESOURCE':
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ('ID', 'type', 'utype'))
attrs.update(self.extra_attributes)
with w.tag('RESOURCE', attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.coordinate_systems, self.params,
self.infos, self.links, self.tables,
self.resources):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
for table in self.tables:
yield table
for resource in self.resources:
for table in resource.iter_tables():
yield table
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
for param in self.params:
yield param
for table in self.tables:
for param in table.iter_fields_and_params():
yield param
for resource in self.resources:
for param in resource.iter_fields_and_params():
yield param
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
for info in self.infos:
yield info
for table in self.tables:
for info in table.iter_info():
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.3"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version not in ("1.0", "1.1", "1.2", "1.3"):
raise ValueError("'version' should be one of '1.0', '1.1', "
"'1.2', or '1.3'")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return '<VOTABLE>... {0} tables ...</VOTABLE>'.format(n_tables)
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in ('1.1', '1.2', '1.3'):
raise ValueError(
"astropy.io.votable only supports VOTable versions "
"1.1, 1.2 and 1.3")
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
config['_current_table_number'] = 0
for start, tag, data, pos in iterator:
if start:
if tag == 'xml':
pass
elif tag == 'VOTABLE':
if 'version' not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config['version'] = self.version
else:
config['version'] = self._version = data['version']
if config['version'].lower().startswith('v'):
warn_or_raise(
W29, W29, config['version'], config, pos)
self._version = config['version'] = \
config['version'][1:]
if config['version'] not in ('1.1', '1.2', '1.3'):
vo_warn(W21, config['version'], config, pos)
if 'xmlns' in data:
correct_ns = ('http://www.ivoa.net/xml/VOTable/v{}'.format(
config['version']))
if data['xmlns'] != correct_ns:
vo_warn(
W41, (correct_ns, data['xmlns']), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config['version_1_1_or_later'] = \
util.version_compare(config['version'], '1.1') >= 0
config['version_1_2_or_later'] = \
util.version_compare(config['version'], '1.2') >= 0
config['version_1_3_or_later'] = \
util.version_compare(config['version'], '1.3') >= 0
tag_mapping = {
'PARAM': self._add_param,
'RESOURCE': self._add_resource,
'COOSYS': self._add_coosys,
'INFO': self._add_info,
'DEFINITIONS': self._add_definitions,
'DESCRIPTION': self._ignore_add,
'GROUP': self._add_group}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'VOTABLE', config, pos)
self.description = data or None
if not len(self.resources) and config['version_1_2_or_later']:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(self, fd, compressed=False, tabledata_format=None,
_debug_python_based_parser=False, _astropy_version=None):
"""
Write to an XML file.
Parameters
----------
fd : str path or writable file-like object
Where to write the file.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in (
'tabledata', 'binary', 'binary2'):
raise ValueError("Unknown format type '{0}'".format(format))
kwargs = {
'version': self.version,
'version_1_1_or_later':
util.version_compare(self.version, '1.1') >= 0,
'version_1_2_or_later':
util.version_compare(self.version, '1.2') >= 0,
'version_1_3_or_later':
util.version_compare(self.version, '1.3') >= 0,
'tabledata_format':
tabledata_format,
'_debug_python_based_parser': _debug_python_based_parser,
'_group_number': 1}
with util.convert_to_writable_filelike(
fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
with w.tag('VOTABLE',
{'version': version,
'xmlns:xsi':
"http://www.w3.org/2001/XMLSchema-instance",
'xsi:noNamespaceSchemaLocation':
"http://www.ivoa.net/xml/VOTable/v{}".format(version),
'xmlns':
"http://www.ivoa.net/xml/VOTable/v{}".format(version)}):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [self.coordinate_systems, self.params,
self.infos, self.resources]
if kwargs['version_1_2_or_later']:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
for table in resource.iter_tables():
yield table
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""")
get_tables_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""")
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(
"No table at index {:d} found in VOTABLE file.".format(idx))
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
for field in resource.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_ or name.
""")
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_values', 'VALUES',
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
for group in table.iter_groups():
yield group
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""")
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
get_coosys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_coosys', 'COOSYS',
"""Looks up a COOSYS_ element by the given ID.""")
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
for info in self.infos:
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
get_info_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_info', 'INFO',
"""Looks up a INFO element by the given ID.""")
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
d509c8fd03e8cddca4d9a867ea6525992e996e96b0aa442ad364be88db5ac49a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
# LOCAL
from . import exceptions
from . import tree
from astropy.utils.xml import iterparser
from astropy.utils import data
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyDeprecationWarning
__all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate',
'reset_vo_warnings']
VERIFY_OPTIONS = ['ignore', 'warn', 'exception']
@deprecated_renamed_argument('pedantic', 'verify', pending=True, since='4.0')
def parse(source, columns=None, invalid='exception', verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None,
table_id=None, filename=None, unit_format=None,
datatype_mapping=None, _debug_python_based_parser=False):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.2 of
VOTable, and (probably) ``vounit`` in future versions of the
spec).
datatype_mapping : dict of str to str, optional
A mapping of datatype names to valid VOTable datatype names.
For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the
mapping ``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
if invalid not in ('exception', 'mask'):
raise ValueError("accepted values of ``invalid`` are: "
"``'exception'`` or ``'mask'``.")
if verify is None:
# NOTE: since the pedantic argument isn't fully deprecated yet, we need
# to catch the deprecation warning that occurs when accessing the
# configuration item, but only if it is for the pedantic option in the
# [io.votable] section.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
r"Config parameter \'pedantic\' in section \[io.votable\]",
AstropyDeprecationWarning)
conf_verify_lowercase = conf.verify.lower()
# We need to allow verify to be booleans as strings since the
# configuration framework doesn't make it easy/possible to have mixed
# types.
if conf_verify_lowercase in ['false', 'true']:
verify = conf_verify_lowercase == 'true'
else:
verify = conf_verify_lowercase
if isinstance(verify, bool):
verify = 'exception' if verify else 'warn'
elif verify not in VERIFY_OPTIONS:
raise ValueError('verify should be one of {0}'.format('/'.join(VERIFY_OPTIONS)))
if datatype_mapping is None:
datatype_mapping = {}
config = {
'columns': columns,
'invalid': invalid,
'verify': verify,
'chunk_size': chunk_size,
'table_number': table_number,
'filename': filename,
'unit_format': unit_format,
'datatype_mapping': datatype_mapping
}
if filename is None and isinstance(source, str):
config['filename'] = source
with iterparser.get_xml_iterator(
source,
_debug_python_based_parser=_debug_python_based_parser) as iterator:
return tree.VOTableFile(
config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get('table_number') is None:
kwargs['table_number'] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like object
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance")
table.to_xml(file, tabledata_format=tabledata_format,
_debug_python_based_parser=True)
def validate(source, output=None, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : str or readable file-like object
Path to a VOTABLE_ xml file or pathlib.path
object having Path to a VOTABLE_ xml file.
output : writable file-like object, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import print_code_line, color_print
if output is None:
output = sys.stdout
return_as_str = False
if output is None:
output = io.StringIO()
lines = []
votable = None
reset_vo_warnings()
with data.get_readable_fileobj(source, encoding='binary') as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, 'name'):
filename = source.name
elif hasattr(source, 'url'):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify='warn', filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [str(x.message) for x in warning_lines if
issubclass(x.category, exceptions.VOWarning)] + lines
content_buffer.seek(0)
output.write("Validation report for {0}\n\n".format(filename))
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w['is_something']:
output.write(w['message'])
output.write('\n\n')
else:
line = xml_lines[w['nline'] - 1]
warning = w['warning']
if w['is_warning']:
color = 'yellow'
else:
color = 'red'
color_print(
'{0:d}: '.format(w['nline']), '',
warning or 'EXC', color,
': ', '',
textwrap.fill(
w['message'],
initial_indent=' ',
subsequent_indent=' ').lstrip(),
file=output)
print_code_line(line, w['nchar'], file=output)
output.write('\n')
else:
output.write('astropy.io.votable found no violations.\n\n')
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(
filename, version)
if success != 0:
output.write(
'xmllint schema violations:\n\n')
output.write(stderr.decode('utf-8'))
else:
output.write('xmllint passed\n')
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != 'xml':
return False
break
for start, tag, d, pos in iterator:
if tag != 'VOTABLE':
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
if hasattr(module, '__warningregistry__'):
del module.__warningregistry__
|
ddfd964d9f366aed3ba2c8db910dec45e14ed00245cc0ed3c0d8a8c42e1db4a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
.. _warnings:
Warnings
--------
.. note::
Most of the following warnings indicate violations of the VOTable
specification. They should be reported to the authors of the
tools that produced the VOTable file.
To control the warnings emitted, use the standard Python
:mod:`warnings` module. Most of these are of the type
`VOTableSpecWarning`.
{warnings}
.. _exceptions:
Exceptions
----------
.. note::
This is a list of many of the fatal exceptions emitted by ``astropy.io.votable``
when the file does not conform to spec. Other exceptions may be
raised due to unforeseen cases or bugs in ``astropy.io.votable`` itself.
{exceptions}
"""
# STDLIB
import io
import re
from textwrap import dedent
from warnings import warn
from astropy.utils.exceptions import AstropyWarning
__all__ = [
'warn_or_raise', 'vo_raise', 'vo_reraise', 'vo_warn',
'warn_unknown_attrs', 'parse_vowarning', 'VOWarning',
'VOTableChangeWarning', 'VOTableSpecWarning',
'UnimplementedWarning', 'IOWarning', 'VOTableSpecError']
MAX_WARNINGS = 10
def _format_message(message, name, config=None, pos=None):
if config is None:
config = {}
if pos is None:
pos = ('?', '?')
filename = config.get('filename', '?')
return '{}:{}:{}: {}: {}'.format(filename, pos[0], pos[1], name, message)
def _suppressed_warning(warning, config, stacklevel=2):
warning_class = type(warning)
config.setdefault('_warning_counts', dict()).setdefault(warning_class, 0)
config['_warning_counts'][warning_class] += 1
message_count = config['_warning_counts'][warning_class]
if message_count <= MAX_WARNINGS:
if message_count == MAX_WARNINGS:
warning.formatted_message += \
' (suppressing further warnings of this type...)'
warn(warning, stacklevel=stacklevel+1)
def warn_or_raise(warning_class, exception_class=None, args=(), config=None,
pos=None, stacklevel=1):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get('verify', 'warn')
if config_value == 'exception':
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == 'warn':
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel+1)
def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos)
def vo_reraise(exc, config=None, pos=None, additional=''):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += ' ' + additional
exc.args = (message,)
raise exc
def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get('verify', 'warn') != 'ignore':
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel+1)
def warn_unknown_attrs(element, attrs, config, pos, good_attr=[], stacklevel=1):
for attr in attrs:
if attr not in good_attr:
vo_warn(W48, (attr, element), config, pos, stacklevel=stacklevel+1)
_warning_pat = re.compile(
(r":?(?P<nline>[0-9?]+):(?P<nchar>[0-9?]+): " +
r"((?P<warning>[WE]\d+): )?(?P<rest>.*)$"))
def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result['warning'] = warning = match.group('warning')
if warning is not None:
result['is_warning'] = (warning[0].upper() == 'W')
result['is_exception'] = not result['is_warning']
result['number'] = int(match.group('warning')[1:])
result['doc_url'] = "io/votable/api_exceptions.html#{0}".format(
warning.lower())
else:
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = True
result['number'] = None
result['doc_url'] = None
try:
result['nline'] = int(match.group('nline'))
except ValueError:
result['nline'] = 0
try:
result['nchar'] = int(match.group('nchar'))
except ValueError:
result['nchar'] = 0
result['message'] = match.group('rest')
result['is_something'] = True
else:
result['warning'] = None
result['is_warning'] = False
result['is_exception'] = False
result['is_other'] = False
result['is_something'] = False
if not isinstance(line, str):
line = line.decode('utf-8')
result['message'] = line
return result
class VOWarning(AstropyWarning):
"""
The base class of all VO warnings and exceptions.
Handles the formatting of the message with a warning or exception
code, filename, line and column number.
"""
default_args = ()
message_template = ''
def __init__(self, args, config=None, pos=None):
if config is None:
config = {}
if not isinstance(args, tuple):
args = (args, )
msg = self.message_template.format(*args)
self.formatted_message = _format_message(
msg, self.__class__.__name__, config, pos)
Warning.__init__(self, self.formatted_message)
def __str__(self):
return self.formatted_message
@classmethod
def get_short_name(cls):
if len(cls.default_args):
return cls.message_template.format(*cls.default_args)
return cls.message_template
class VOTableChangeWarning(VOWarning, SyntaxWarning):
"""
A change has been made to the input XML file.
"""
class VOTableSpecWarning(VOWarning, SyntaxWarning):
"""
The input XML file violates the spec, but there is an obvious workaround.
"""
class UnimplementedWarning(VOWarning, SyntaxWarning):
"""
A feature of the VOTABLE_ spec is not implemented.
"""
class IOWarning(VOWarning, RuntimeWarning):
"""
A network or IO error occurred, but was recovered using the cache.
"""
class VOTableSpecError(VOWarning, ValueError):
"""
The input XML file violates the spec and there is no good workaround.
"""
class W01(VOTableSpecWarning):
"""
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` supports this convention when not in
:ref:`pedantic-mode`.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
class W02(VOTableSpecWarning):
r"""
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <http://www.w3.org/TR/REC-xml/#NT-Name>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ('x', 'y')
class W03(VOTableChangeWarning):
"""
The VOTable 1.1 spec says the following about ``name`` vs. ``ID``
on ``FIELD`` and ``VALUE`` elements:
``ID`` and ``name`` attributes have a different role in
VOTable: the ``ID`` is meant as a *unique identifier* of an
element seen as a VOTable component, while the ``name`` is
meant for presentation purposes, and need not to be unique
throughout the VOTable document. The ``ID`` attribute is
therefore required in the elements which have to be
referenced, but in principle any element may have an ``ID``
attribute. ... In summary, the ``ID`` is different from the
``name`` attribute in that (a) the ``ID`` attribute is made
from a restricted character set, and must be unique throughout
a VOTable document whereas names are standard XML attributes
and need not be unique; and (b) there should be support in the
parsing software to look up references and extract the
relevant element with matching ``ID``.
It is further recommended in the VOTable 1.2 spec:
While the ``ID`` attribute has to be unique in a VOTable
document, the ``name`` attribute need not. It is however
recommended, as a good practice, to assign unique names within
a ``TABLE`` element. This recommendation means that, between a
``TABLE`` and its corresponding closing ``TABLE`` tag,
``name`` attributes of ``FIELD``, ``PARAM`` and optional
``GROUP`` elements should be all different.
Since ``astropy.io.votable`` requires a unique identifier for each of its
columns, ``ID`` is used for the column name when present.
However, when ``ID`` is not present, (since it is not required by
the specification) ``name`` is used instead. However, ``name``
must be cleansed by replacing invalid characters (such as
whitespace) with underscores.
.. note::
This warning does not indicate that the input file is invalid
with respect to the VOTable specification, only that the
column names in the record array may not match exactly the
``name`` attributes specified in the file.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Implicitly generating an ID from a name '{}' -> '{}'"
default_args = ('x', 'y')
class W04(VOTableSpecWarning):
"""
The ``content-type`` attribute must use MIME content-type syntax as
defined in `RFC 2046 <https://tools.ietf.org/html/rfc2046>`__.
The current check for validity is somewhat over-permissive.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "content-type '{}' must be a valid MIME content type"
default_args = ('x',)
class W05(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<http://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ('x',)
class W06(VOTableSpecWarning):
"""
This warning is emitted when a ``ucd`` attribute does not match
the syntax of a `unified content descriptor
<http://vizier.u-strasbg.fr/doc/UCD.htx>`__.
If the VOTable version is 1.2 or later, the UCD will also be
checked to ensure it conforms to the controlled vocabulary defined
by UCD1+.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:ucd>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:ucd>`__
"""
message_template = "Invalid UCD '{}': {}"
default_args = ('x', 'explanation')
class W07(VOTableSpecWarning):
"""
As astro year field is a Besselian or Julian year matching the
regular expression::
^[JB]?[0-9]+([.][0-9]*)?$
Defined in this XML Schema snippet::
<xs:simpleType name="astroYear">
<xs:restriction base="xs:token">
<xs:pattern value="[JB]?[0-9]+([.][0-9]*)?"/>
</xs:restriction>
</xs:simpleType>
"""
message_template = "Invalid astroYear in {}: '{}'"
default_args = ('x', 'y')
class W08(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ('x',)
class W09(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
class W10(VOTableSpecWarning):
"""
The parser has encountered an element that does not exist in the
specification, or appears in an invalid context. Check the file
against the VOTable schema (with a tool such as `xmllint
<http://xmlsoft.org/xmllint.html>`__. If the file validates
against the schema, and you still receive this warning, this may
indicate a bug in ``astropy.io.votable``.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Unknown tag '{}'. Ignoring"
default_args = ('x',)
class W11(VOTableSpecWarning):
"""
Earlier versions of the VOTable specification used a ``gref``
attribute on the ``LINK`` element to specify a `GLU reference
<http://aladin.u-strasbg.fr/glu/>`__. New files should
specify a ``glu:`` protocol using the ``href`` attribute.
Since ``astropy.io.votable`` does not currently support GLU references, it
likewise does not automatically convert the ``gref`` attribute to
the new form.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:link>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:link>`__
"""
message_template = "The gref attribute on LINK is deprecated in VOTable 1.1"
class W12(VOTableChangeWarning):
"""
In order to name the columns of the Numpy record array, each
``FIELD`` element must have either an ``ID`` or ``name`` attribute
to derive a name from. Strictly speaking, according to the
VOTable schema, the ``name`` attribute is required. However, if
``name`` is not present by ``ID`` is, and ``verify`` is not ``'exception'``,
``astropy.io.votable`` will continue without a ``name`` defined.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = (
"'{}' element must have at least one of 'ID' or 'name' attributes")
default_args = ('x',)
class W13(VOTableSpecWarning):
"""
Some VOTable files in the wild use non-standard datatype names. These
are mapped to standard ones using the following mapping::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' is not a valid VOTable datatype, should be '{}'"
default_args = ('x', 'y')
# W14: Deprecated
class W15(VOTableSpecWarning):
"""
The ``name`` attribute is required on every ``FIELD`` element.
However, many VOTable files in the wild omit it and provide only
an ``ID`` instead. In this case, when ``verify`` is not ``'exception'``
``astropy.io.votable`` will copy the ``name`` attribute to a new ``ID``
attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "{} element missing required 'name' attribute"
default_args = ('x',)
# W16: Deprecated
class W17(VOTableSpecWarning):
"""
A ``DESCRIPTION`` element can only appear once within its parent
element.
According to the schema, it may only occur once (`1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__)
However, it is a `proposed extension
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:addesc>`__
to VOTable 1.2.
"""
message_template = "{} element contains more than one DESCRIPTION element"
default_args = ('x',)
class W18(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = 'TABLE specified nrows={}, but table contains {} rows'
default_args = ('x', 'y')
class W19(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded FITS file. If ``verify`` is not
``'exception'``, the embedded FITS file will take precedence.
"""
message_template = (
'The fields defined in the VOTable do not match those in the ' +
'embedded FITS file')
class W20(VOTableSpecWarning):
"""
If no version number is explicitly given in the VOTable file, the
parser assumes it is written to the VOTable 1.1 specification.
"""
message_template = 'No version number specified in file. Assuming {}'
default_args = ('1.1',)
class W21(UnimplementedWarning):
"""
Unknown issues may arise using ``astropy.io.votable`` with VOTable files
from a version other than 1.1, 1.2 or 1.3.
"""
message_template = (
'astropy.io.votable is designed for VOTable version 1.1, 1.2 and 1.3, but ' +
'this file is {}')
default_args = ('x',)
class W22(VOTableSpecWarning):
"""
Version 1.0 of the VOTable specification used the ``DEFINITIONS``
element to define coordinate systems. Version 1.1 now uses
``COOSYS`` elements throughout the document.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:definitions>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:definitions>`__
"""
message_template = 'The DEFINITIONS element is deprecated in VOTable 1.1. Ignoring'
class W23(IOWarning):
"""
Raised when the VO service database can not be updated (possibly
due to a network outage). This is only a warning, since an older
and possible out-of-date VO service database was available
locally.
"""
message_template = "Unable to update service information for '{}'"
default_args = ('x',)
class W24(VOWarning, FutureWarning):
"""
The VO catalog database retrieved from the www is designed for a
newer version of ``astropy.io.votable``. This may cause problems or limited
features performing service queries. Consider upgrading ``astropy.io.votable``
to the latest version.
"""
message_template = "The VO catalog database is for a later version of astropy.io.votable"
class W25(IOWarning):
"""
A VO service query failed due to a network error or malformed
arguments. Another alternative service may be attempted. If all
services fail, an exception will be raised.
"""
message_template = "'{}' failed with: {}"
default_args = ('service', '...')
class W26(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ('child', 'parent', 'X.X')
class W27(VOTableSpecWarning):
"""
The ``COOSYS`` element was deprecated in VOTABLE version 1.2 in
favor of a reference to the Space-Time Coordinate (STC) data
model (see `utype
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:utype>`__
and the IVOA note `referencing STC in VOTable
<http://ivoa.net/Documents/latest/VOTableSTC.html>`__.
"""
message_template = "COOSYS deprecated in VOTable 1.2"
class W28(VOTableSpecWarning):
"""
The given attribute was not supported on the given element until the
specified VOTable version, however the version declared in the file is
for an earlier version. These attributes may not be written out to
the file.
"""
message_template = "'{}' on '{}' added in VOTable {}"
default_args = ('attribute', 'element', 'X.X')
class W29(VOTableSpecWarning):
"""
Some VOTable files specify their version number in the form "v1.0",
when the only supported forms in the spec are "1.0".
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "Version specified in non-standard form '{}'"
default_args = ('v1.0',)
class W30(VOTableSpecWarning):
"""
Some VOTable files write missing floating-point values in non-standard ways,
such as "null" and "-". If ``verify`` is not ``'exception'``, any
non-standard floating-point literals are treated as missing values.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid literal for float '{}'. Treating as empty."
default_args = ('x',)
class W31(VOTableSpecWarning):
"""
Since NaN's can not be represented in integer fields directly, a null
value must be specified in the FIELD descriptor to support reading
NaN's from the tabledata.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "NaN given in an integral field without a specified null value"
class W32(VOTableSpecWarning):
"""
Each field in a table must have a unique ID. If two or more fields
have the same ID, some will be renamed to ensure that all IDs are
unique.
From the VOTable 1.2 spec:
The ``ID`` and ``ref`` attributes are defined as XML types
``ID`` and ``IDREF`` respectively. This means that the
contents of ``ID`` is an identifier which must be unique
throughout a VOTable document, and that the contents of the
``ref`` attribute represents a reference to an identifier
which must exist in the VOTable document.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Duplicate ID '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W33(VOTableChangeWarning):
"""
Each field in a table must have a unique name. If two or more
fields have the same name, some will be renamed to ensure that all
names are unique.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "Column name '{}' renamed to '{}' to ensure uniqueness"
default_args = ('x', 'x_2')
class W34(VOTableSpecWarning):
"""
The attribute requires the value to be a valid XML token, as
defined by `XML 1.0
<http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Nmtoken>`__.
"""
message_template = "'{}' is an invalid token for attribute '{}'"
default_args = ('x', 'y')
class W35(VOTableSpecWarning):
"""
The ``name`` and ``value`` attributes are required on all ``INFO``
elements.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC32>`__
"""
message_template = "'{}' attribute required for INFO elements"
default_args = ('x',)
class W36(VOTableSpecWarning):
"""
If the field specifies a ``null`` value, that value must conform
to the given ``datatype``.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "null value '{}' does not match field datatype, setting to 0"
default_args = ('x',)
class W37(UnimplementedWarning):
"""
The 3 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY`` and ``FITS``.
**References:** `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ('x',)
class W38(VOTableSpecWarning):
"""
The only encoding for local binary data supported by the VOTable
specification is base64.
"""
message_template = "Inline binary data must be base64 encoded, got '{}'"
default_args = ('x',)
class W39(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
class W40(VOTableSpecWarning):
"""
This is a terrible hack to support Simple Image Access Protocol
results from `archive.noao.edu <http://archive.noao.edu>`__. It
creates a field for the coordinate projection type of type "double",
which actually contains character data. We have to hack the field
to store character data, or we can't read it in. A warning will be
raised when this happens.
"""
message_template = "'cprojection' datatype repaired"
class W41(VOTableSpecWarning):
"""
An XML namespace was specified on the ``VOTABLE`` element, but the
namespace does not match what is expected for a ``VOTABLE`` file.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
Some files in the wild set the namespace to the location of the
VOTable schema, which is not correct and will not pass some
validating parsers.
"""
message_template = (
"An XML namespace is specified, but is incorrect. Expected " +
"'{}', got '{}'")
default_args = ('x', 'y')
class W42(VOTableSpecWarning):
"""
The root element should specify a namespace.
The ``VOTABLE`` namespace is::
http://www.ivoa.net/xml/VOTable/vX.X
where "X.X" is the version number.
"""
message_template = "No XML namespace specified"
class W43(VOTableSpecWarning):
"""
Referenced elements should be defined before referees. From the
VOTable 1.2 spec:
In VOTable1.2, it is further recommended to place the ID
attribute prior to referencing it whenever possible.
"""
message_template = "{} ref='{}' which has not already been defined"
default_args = ('element', 'x',)
class W44(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ('element',)
class W45(VOWarning, ValueError):
"""
The ``content-role`` attribute on the ``LINK`` element must be one of
the following::
query, hints, doc, location
And in VOTable 1.3, additionally::
type
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
`1.3
<http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#sec:link>`__
"""
message_template = "content-role attribute '{}' invalid"
default_args = ('x',)
class W46(VOTableSpecWarning):
"""
The given char or unicode string is too long for the specified
field length.
"""
message_template = "{} value is too long for specified length of {}"
default_args = ('char or unicode', 'x')
class W47(VOTableSpecWarning):
"""
If no arraysize is specified on a char field, the default of '1'
is implied, but this is rarely what is intended.
"""
message_template = "Missing arraysize indicates length 1"
class W48(VOTableSpecWarning):
"""
The attribute is not defined in the specification.
"""
message_template = "Unknown attribute '{}' on {}"
default_args = ('attribute', 'element')
class W49(VOTableSpecWarning):
"""
Prior to VOTable 1.3, the empty cell was illegal for integer
fields.
If a \"null\" value was specified for the cell, it will be used
for the value, otherwise, 0 will be used.
"""
message_template = "Empty cell illegal for integer fields."
class W50(VOTableSpecWarning):
"""
Invalid unit string as defined in the `Standards for Astronomical
Catalogues, Version 2.0
<http://cdsarc.u-strasbg.fr/doc/catstd-3.2.htx>`_.
Consider passing an explicit ``unit_format`` parameter if the units
in this file conform to another specification.
"""
message_template = "Invalid unit string '{}'"
default_args = ('x',)
class W51(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ('x', 'n-bit')
class W52(VOTableSpecWarning):
"""
The BINARY2 format was introduced in VOTable 1.3. It should
not be present in files marked as an earlier version.
"""
message_template = ("The BINARY2 format was introduced in VOTable 1.3, but "
"this file is declared as version '{}'")
default_args = ('1.2',)
class W53(VOTableSpecWarning):
"""
The VOTABLE element must contain at least one RESOURCE element.
"""
message_template = ("VOTABLE element must contain at least one RESOURCE element.")
default_args = ()
class E01(VOWarning, ValueError):
"""
The size specifier for a ``char`` or ``unicode`` field must be
only a number followed, optionally, by an asterisk.
Multi-dimensional size specifiers are not supported for these
datatypes.
Strings, which are defined as a set of characters, can be
represented in VOTable as a fixed- or variable-length array of
characters::
<FIELD name="unboundedString" datatype="char" arraysize="*"/>
A 1D array of strings can be represented as a 2D array of
characters, but given the logic above, it is possible to define a
variable-length array of fixed-length strings, but not a
fixed-length array of variable-length strings.
"""
message_template = "Invalid size specifier '{}' for a {} field (in field '{}')"
default_args = ('x', 'char/unicode', 'y')
class E02(VOWarning, ValueError):
"""
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. " +
"Expected multiple of {}, got {}")
default_args = ('x', 'y')
class E03(VOWarning, ValueError):
"""
Complex numbers should be two values separated by whitespace.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "'{}' does not parse as a complex number"
default_args = ('x',)
class E04(VOWarning, ValueError):
"""
A ``bit`` array should be a string of '0's and '1's.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid bit value '{}'"
default_args = ('x',)
class E05(VOWarning, ValueError):
r"""
A ``boolean`` value should be one of the following strings (case
insensitive) in the ``TABLEDATA`` format::
'TRUE', 'FALSE', '1', '0', 'T', 'F', '\0', ' ', '?'
and in ``BINARY`` format::
'T', 'F', '1', '0', '\0', ' ', '?'
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Invalid boolean value '{}'"
default_args = ('x',)
class E06(VOWarning, ValueError):
"""
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ('x', 'y')
# E07: Deprecated
class E08(VOWarning, ValueError):
"""
The ``type`` attribute on the ``VALUES`` element must be either
``legal`` or ``actual``.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "type must be 'legal' or 'actual', but is '{}'"
default_args = ('x',)
class E09(VOWarning, ValueError):
"""
The ``MIN``, ``MAX`` and ``OPTION`` elements must always have a
``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__
"""
message_template = "'{}' must have a value attribute"
default_args = ('x',)
class E10(VOWarning, ValueError):
"""
From VOTable 1.1 and later, ``FIELD`` and ``PARAM`` elements must have
a ``datatype`` field.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "'datatype' attribute required on all '{}' elements"
default_args = ('FIELD',)
class E11(VOWarning, ValueError):
"""
The precision attribute is meant to express the number of significant
digits, either as a number of decimal places (e.g. ``precision="F2"`` or
equivalently ``precision="2"`` to express 2 significant figures
after the decimal point), or as a number of significant figures
(e.g. ``precision="E5"`` indicates a relative precision of 10-5).
It is validated using the following regular expression::
[EF]?[1-9][0-9]*
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "precision '{}' is invalid"
default_args = ('x',)
class E12(VOWarning, ValueError):
"""
The width attribute is meant to indicate to the application the
number of characters to be used for input or output of the
quantity.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:form>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:form>`__
"""
message_template = "width must be a positive integer, got '{}'"
default_args = ('x',)
class E13(VOWarning, ValueError):
r"""
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ('x',)
class E14(VOWarning, ValueError):
"""
All ``PARAM`` elements must have a ``value`` attribute.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:FIELD>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#elem:FIELD>`__
"""
message_template = "value attribute is required for all PARAM elements"
class E15(VOWarning, ValueError):
"""
All ``COOSYS`` elements must have an ``ID`` attribute.
Note that the VOTable 1.1 specification says this attribute is
optional, but its corresponding schema indicates it is required.
In VOTable 1.2, the ``COOSYS`` element is deprecated.
"""
message_template = "ID attribute is required for all COOSYS elements"
class E16(VOTableSpecWarning):
"""
The ``system`` attribute on the ``COOSYS`` element must be one of the
following::
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', 'geo_app'
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#elem:COOSYS>`__
"""
message_template = "Invalid system attribute '{}'"
default_args = ('x',)
class E17(VOWarning, ValueError):
"""
``extnum`` attribute must be a positive integer.
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "extnum must be a positive integer"
class E18(VOWarning, ValueError):
"""
The ``type`` attribute of the ``RESOURCE`` element must be one of
"results" or "meta".
**References**: `1.1
<http://www.ivoa.net/Documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC54>`__,
`1.2
<http://www.ivoa.net/Documents/VOTable/20091130/REC-VOTable-1.2.html#ToC58>`__
"""
message_template = "type must be 'results' or 'meta', not '{}'"
default_args = ('x',)
class E19(VOWarning, ValueError):
"""
Raised either when the file doesn't appear to be XML, or the root
element is not VOTABLE.
"""
message_template = "File does not appear to be a VOTABLE"
class E20(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ('x',)
class E21(VOWarning, ValueError):
"""
The table had *x* fields defined, but the data itself has only *y*
columns.
"""
message_template = "Data has fewer columns ({}) than are defined in the header ({})"
default_args = ('x', 'y')
def _get_warning_and_exception_classes(prefix):
classes = []
for key, val in globals().items():
if re.match(prefix + "[0-9]{2}", key):
classes.append((key, val))
classes.sort()
return classes
def _build_doc_string():
def generate_set(prefix):
classes = _get_warning_and_exception_classes(prefix)
out = io.StringIO()
for name, cls in classes:
out.write(".. _{}:\n\n".format(name))
msg = "{}: {}".format(cls.__name__, cls.get_short_name())
if not isinstance(msg, str):
msg = msg.decode('utf-8')
out.write(msg)
out.write('\n')
out.write('~' * len(msg))
out.write('\n\n')
doc = cls.__doc__
if not isinstance(doc, str):
doc = doc.decode('utf-8')
out.write(dedent(doc))
out.write('\n\n')
return out.getvalue()
warnings = generate_set('W')
exceptions = generate_set('E')
return {'warnings': warnings,
'exceptions': exceptions}
if __doc__ is not None:
__doc__ = __doc__.format(**_build_doc_string())
__all__.extend([x[0] for x in _get_warning_and_exception_classes('W')])
__all__.extend([x[0] for x in _get_warning_and_exception_classes('E')])
|
ac3be09491600673c291713f972fe2facf5ac847d05868e800a6717cda2c4e55 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsheader`` is a command line script based on astropy.io.fits for printing
the header(s) of one or more FITS file(s) to the standard output in a human-
readable format.
Example uses of fitsheader:
1. Print the header of all the HDUs of a .fits file::
$ fitsheader filename.fits
2. Print the header of the third and fifth HDU extension::
$ fitsheader --extension 3 --extension 5 filename.fits
3. Print the header of a named extension, e.g. select the HDU containing
keywords EXTNAME='SCI' and EXTVER='2'::
$ fitsheader --extension "SCI,2" filename.fits
4. Print only specific keywords::
$ fitsheader --keyword BITPIX --keyword NAXIS filename.fits
5. Print keywords NAXIS, NAXIS1, NAXIS2, etc using a wildcard::
$ fitsheader --keyword NAXIS* filename.fits
6. Dump the header keywords of all the files in the current directory into a
machine-readable csv file::
$ fitsheader --table ascii.csv *.fits > keywords.csv
7. Specify hierarchical keywords with the dotted or spaced notation::
$ fitsheader --keyword ESO.INS.ID filename.fits
$ fitsheader --keyword "ESO INS ID" filename.fits
8. Compare the headers of different fites files, following ESO's ``fitsort``
format::
$ fitsheader --fitsort --extension 0 --keyword ESO.INS.ID *.fits
9. Same as above, sorting the output along a specified keyword::
$ fitsheader -f DATE-OBS -e 0 -k DATE-OBS -k ESO.INS.ID *.fits
Note that compressed images (HDUs of type
:class:`~astropy.io.fits.CompImageHDU`) really have two headers: a real
BINTABLE header to describe the compressed data, and a fake IMAGE header
representing the image that was compressed. Astropy returns the latter by
default. You must supply the ``--compressed`` option if you require the real
header that describes the compression.
With Astropy installed, please run ``fitsheader --help`` to see the full usage
documentation.
"""
import sys
import argparse
import numpy as np
from astropy.io import fits
from astropy import log
class ExtensionNotFoundException(Exception):
"""Raised if an HDU extension requested by the user does not exist."""
pass
class HeaderFormatter:
"""Class to format the header(s) of a FITS file for display by the
`fitsheader` tool; essentially a wrapper around a `HDUList` object.
Example usage:
fmt = HeaderFormatter('/path/to/file.fits')
print(fmt.parse(extensions=[0, 3], keywords=['NAXIS', 'BITPIX']))
Parameters
----------
filename : str
Path to a single FITS file.
verbose : bool
Verbose flag, to show more information about missing extensions,
keywords, etc.
Raises
------
OSError
If `filename` does not exist or cannot be read.
"""
def __init__(self, filename, verbose=True):
self.filename = filename
self.verbose = verbose
self._hdulist = fits.open(filename)
def parse(self, extensions=None, keywords=None, compressed=False):
"""Returns the FITS file header(s) in a readable format.
Parameters
----------
extensions : list of int or str, optional
Format only specific HDU(s), identified by number or name.
The name can be composed of the "EXTNAME" or "EXTNAME,EXTVER"
keywords.
keywords : list of str, optional
Keywords for which the value(s) should be returned.
If not specified, then the entire header is returned.
compressed : boolean, optional
If True, shows the header describing the compression, rather than
the header obtained after decompression. (Affects FITS files
containing `CompImageHDU` extensions only.)
Returns
-------
formatted_header : str or astropy.table.Table
Traditional 80-char wide format in the case of `HeaderFormatter`;
an Astropy Table object in the case of `TableHeaderFormatter`.
"""
# `hdukeys` will hold the keys of the HDUList items to display
if extensions is None:
hdukeys = range(len(self._hdulist)) # Display all by default
else:
hdukeys = []
for ext in extensions:
try:
# HDU may be specified by number
hdukeys.append(int(ext))
except ValueError:
# The user can specify "EXTNAME" or "EXTNAME,EXTVER"
parts = ext.split(',')
if len(parts) > 1:
extname = ','.join(parts[0:-1])
extver = int(parts[-1])
hdukeys.append((extname, extver))
else:
hdukeys.append(ext)
# Having established which HDUs the user wants, we now format these:
return self._parse_internal(hdukeys, keywords, compressed)
def _parse_internal(self, hdukeys, keywords, compressed):
"""The meat of the formatting; in a separate method to allow overriding.
"""
result = []
for idx, hdu in enumerate(hdukeys):
try:
cards = self._get_cards(hdu, keywords, compressed)
except ExtensionNotFoundException:
continue
if idx > 0: # Separate HDUs by a blank line
result.append('\n')
result.append('# HDU {} in {}:\n'.format(hdu, self.filename))
for c in cards:
result.append('{}\n'.format(c))
return ''.join(result)
def _get_cards(self, hdukey, keywords, compressed):
"""Returns a list of `astropy.io.fits.card.Card` objects.
This function will return the desired header cards, taking into
account the user's preference to see the compressed or uncompressed
version.
Parameters
----------
hdukey : int or str
Key of a single HDU in the HDUList.
keywords : list of str, optional
Keywords for which the cards should be returned.
compressed : boolean, optional
If True, shows the header describing the compression.
Raises
------
ExtensionNotFoundException
If the hdukey does not correspond to an extension.
"""
# First we obtain the desired header
try:
if compressed:
# In the case of a compressed image, return the header before
# decompression (not the default behavior)
header = self._hdulist[hdukey]._header
else:
header = self._hdulist[hdukey].header
except (IndexError, KeyError):
message = '{0}: Extension {1} not found.'.format(self.filename,
hdukey)
if self.verbose:
log.warning(message)
raise ExtensionNotFoundException(message)
if not keywords: # return all cards
cards = header.cards
else: # specific keywords are requested
cards = []
for kw in keywords:
try:
crd = header.cards[kw]
if isinstance(crd, fits.card.Card): # Single card
cards.append(crd)
else: # Allow for wildcard access
cards.extend(crd)
except KeyError as e: # Keyword does not exist
if self.verbose:
log.warning('{filename} (HDU {hdukey}): '
'Keyword {kw} not found.'.format(
filename=self.filename,
hdukey=hdukey,
kw=kw))
return cards
def close(self):
self._hdulist.close()
class TableHeaderFormatter(HeaderFormatter):
"""Class to convert the header(s) of a FITS file into a Table object.
The table returned by the `parse` method will contain four columns:
filename, hdu, keyword, and value.
Subclassed from HeaderFormatter, which contains the meat of the formatting.
"""
def _parse_internal(self, hdukeys, keywords, compressed):
"""Method called by the parse method in the parent class."""
tablerows = []
for hdu in hdukeys:
try:
for card in self._get_cards(hdu, keywords, compressed):
tablerows.append({'filename': self.filename,
'hdu': hdu,
'keyword': card.keyword,
'value': str(card.value)})
except ExtensionNotFoundException:
pass
if tablerows:
from astropy import table
return table.Table(tablerows)
return None
def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keywords:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(formatter.parse(args.extensions,
args.keywords,
args.compressed), end='')
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close()
def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions,
args.keywords,
args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table)
def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions,
args.keywords,
args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl['keyword'] != '')]
else:
tbl = table.Table([[filename]], names=('filename',))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table['hdu']
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column(
['{}:{}'.format(row['hdu'], row['keyword']) for row in tab])
tab.add_column(new_column, name='hdu+keyword')
keyword_column_name = 'hdu+keyword'
else:
keyword_column_name = 'keyword'
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab['filename'][0]], name='filename')]
if 'value' in tab.colnames:
for row in tab:
if row['keyword'] in ('COMMENT', 'HISTORY'):
continue
final_table.append(table.Column([row['value']],
name=row[keyword_column_name]))
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.fitsort is not True: # then it must be a keyword, therefore sort
final_table.sort(args.fitsort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1)
class KeywordAppendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
keyword = values.replace('.', ' ')
if namespace.keywords is None:
namespace.keywords = []
if keyword not in namespace.keywords:
namespace.keywords.append(keyword)
def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=('Print the header(s) of a FITS file. '
'Optional arguments allow the desired extension(s), '
'keyword(s), and output format to be specified. '
'Note that in the case of a compressed image, '
'the decompressed header is shown by default.'))
parser.add_argument('-e', '--extension', metavar='HDU',
action='append', dest='extensions',
help='specify the extension by name or number; '
'this argument can be repeated '
'to select multiple extensions')
parser.add_argument('-k', '--keyword', metavar='KEYWORD',
action=KeywordAppendAction, dest='keywords',
help='specify a keyword; this argument can be '
'repeated to select multiple keywords; '
'also supports wildcards')
parser.add_argument('-t', '--table',
nargs='?', default=False, metavar='FORMAT',
help='print the header(s) in machine-readable table '
'format; the default format is '
'"ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)')
parser.add_argument('-f', '--fitsort', action='store_true',
help='print the headers as a table with each unique '
'keyword in a given column (fitsort format); '
'if a SORT_KEYWORD is specified, the result will be '
'sorted along that keyword')
parser.add_argument('-c', '--compressed', action='store_true',
help='for compressed image data, '
'show the true header which describes '
'the compression rather than the data')
parser.add_argument('filename', nargs='+',
help='path to one or more files; '
'wildcards are supported')
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = 'ascii.fixed_width'
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError as e:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass
|
6a9acd02a0b335e9e427d205722042991d55ef749392e19a4540e2e93f8bf814 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
_AsciiColDefs, _FormatP, _FormatQ, _makep,
_parse_tformat, _scalar_to_format, _convert_format,
_cmp_recformats)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.decorators import deprecated_renamed_argument
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = ' '
lineterminator = '\n'
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(cls, columns, header=None, nrows=0, fill=False,
character_as_bytes=False, **kwargs):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs`, or other
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,
character_as_bytes=character_as_bytes)
hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
Table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
self._data_offset)
data = raw_data[:self._theap].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder('>')
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header['PCOUNT']
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# TODO: It's not clear that this actually works--it probably does not.
# This is what the code used to do before introduction of the
# notifier interface, but I don't believe it actually worked (there are
# several bug reports related to this...)
if self._data_loaded:
del self.data
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# For now this doesn't do anything fancy--it just deletes the data
# attribute so that it is forced to be recreated again. It doesn't
# change anything on the existing data recarray (this is also how this
# worked before introducing the notifier interface)
if self._data_loaded:
del self.data
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
('XTENSION', '', ''),
('BITPIX', 8, 'array data type'),
('NAXIS', 2, 'number of array dimensions'),
('NAXIS1', 0, 'length of dimension 1'),
('NAXIS2', 0, 'length of dimension 2'),
('PCOUNT', 0, 'number of group parameters'),
('GCOUNT', 1, 'number of groups'),
('TFIELDS', 0, 'number of table fields')]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ', '.join(x + 'n' for x in sorted(future_ignore))
warnings.warn("The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {0}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys), AstropyDeprecationWarning)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header['NAXIS1'] = self.data._raw_itemsize
self._header['NAXIS2'] = self.data.shape[0]
self._header['TFIELDS'] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
if not (isinstance(self._header[0], str) and
self._header[0].rstrip() == self._extension):
self._header[0] = (self._extension, self._ext_comment)
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ','.join(self.columns._recformats)
data = np.rec.array(None, formats=formats,
names=self.columns.names,
shape=0)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
self.__dict__['data'] = data
self.columns = self.data.columns
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get('NAXIS2', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
return self._header.get('THEAP', size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')
self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')
self._header.set('TFIELDS', len(self.columns), after='GCOUNT')
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(),
header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(
update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header['TFIELDS'] = len(self.data._coldefs)
self._header['NAXIS2'] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
heapstart = self._header.get('THEAP', tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header['PCOUNT'] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option='warn'):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)
self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)
self.req_cards('TFIELDS', 7,
lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,
option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TFORM' + str(idx + 1), None, None, None, option,
errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
shape, format = (), ''
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
shape = ()
nrows = self._header['NAXIS2']
ncols = self._header['TFIELDS']
format = ', '.join([self._header['TFORM' + str(j + 1)]
for j in range(ncols)])
format = '[{}]'.format(format)
dims = "{}R x {}C".format(nrows, ncols)
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(self, column, col_idx, attr,
old_value, new_value):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(before_keyword, (keyword, new_value),
after=True)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword,
(keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
continue
num = int(match.group('num')) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword,
num))
# First delete
rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),
reverse=True)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value,
old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if 'TFIELDS' in self._header:
self._header['TFIELDS'] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'TABLE'
_ext_comment = 'ASCII table extension'
_padding_byte = ' '
_columns_type = _AsciiColDefs
__format_RE = re.compile(
r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):
super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError("Duplicate field names: {}".format(dup))
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = 'S' + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header['NAXIS1'] > itemsize:
data_type = 'S' + str(columns.spans[idx] +
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b' ',
dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option='warn'):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,
errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'BINTABLE'
_ext_comment = 'binary table extension'
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(data, header, name=name, uint=uint, ver=ver,
character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return (card.keyword == 'XTENSION' and
xtension in (cls._extension, 'A3DTABLE'))
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
# TODO: The following code may no longer be necessary since it is
# now possible to get a pointer directly to the heap data as a
# whole. That said, it is possible for the heap section to contain
# data that is not actually pointed to by the table (i.e. garbage;
# this *shouldn't* happen but it is not disallowed either)--need to
# double check whether or not the checksum should include such
# garbage
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
fileobj.write((data._gap * '\0').encode('ascii'))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx],
_FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
if not fileobj.simulateonly:
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
if not fileobj.simulateonly:
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx)
for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == 'U':
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i+1:])
item = np.char.encode(item, 'ascii')
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j+1:])
# Fix padding problem (see #5296).
padding = '\x00'*(field_width - item_length)
fileobj.write(padding.encode('ascii'))
_tdump_file_format = textwrap.dedent("""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
""")
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : file path, file object or file-like object, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : file path, file object or file-like object, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : file path, file object or file-like object, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, str):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(' '.join(["File '{}' already exists.".format(f)
for f in exist]))
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep='\n', endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace('\n', '\n ')
def load(cls, datafile, cdfile=None, hfile=None, replace=False,
header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : file path, file object or file-like object
Input data file containing the table data in ASCII format.
cdfile : file path, file object, file-like object, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : file path, file object, file-like object, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : Header object
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(Header.fromtextfile(hfile), update=True,
update_first=True)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace('\n', '\n ')
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + '.txt'
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == 'S':
itemsize = int(format[1:])
return '{:{size}}'.format(val, size=itemsize)
elif format in np.typecodes['AllInteger']:
# output integer
return '{:21d}'.format(val)
elif format in np.typecodes['Complex']:
return '{:21.15g}+{:.15g}j'.format(val.real, val.imag)
elif format in np.typecodes['Float']:
# output floating point
return '{:#21.15g}'.format(val)
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append('VLA_Length=')
line.append('{:21d}'.format(len(row[column.name])))
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == 'V':
array_format = dtype.base.char
if array_format == 'S':
array_format += str(dtype.itemsize)
if dtype.char == 'V':
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name],
array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']
line += ['{:16s}'.format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)]
fileobj.write(' '.join(line))
fileobj.write('\n')
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == 'VLA_Length=':
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)):]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY['L']:
return bool(int(val))
elif recformats[col] == FITS2NUMPY['M']:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == 'VLA_Length=':
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx:idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [format_value(col, val)
for val in line[slice_]]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ['name', 'format', 'disp', 'unit', 'dim']:
kwargs[key] = words.pop(0).replace('""', '')
for key in ['null', 'bscale', 'bzero']:
word = words.pop(0).replace('""', '')
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (not isinstance(c, chararray.chararray) and
c.itemsize > 1 and c.dtype.str[0] in swap_types):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({'names': names,
'formats': formats,
'offsets': offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
098acd4f73533f5cda790ee3fb3edf2d6ad0dbb2908071595492050948eb1111 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import os
import sys
import warnings
from contextlib import suppress
from inspect import signature, Parameter
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits.file import _File
from astropy.io.fits.header import (Header, _BasicHeader, _pad_length,
_DelayedHeader)
from astropy.io.fits.util import (_is_int, _is_pseudo_unsigned, _unsigned_zero,
itersubclasses, decode_ascii, _get_array_mmap, first,
_free_space_check, _extract_number)
from astropy.io.fits.verify import _Verify, _ErrList
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.decorators import deprecated_renamed_argument
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64',
-32: 'float32', -64: 'float64'}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {'uint8': 8, 'int16': 16, 'uint16': 16, 'int32': 32,
'uint32': 32, 'int64': 64, 'uint64': 64, 'float32': -32,
'float64': -64}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (c.__module__.startswith('astropy.io.fits.') or
c in cls._hdu_registry):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
'An exception occurred matching an HDU header to the '
'appropriate HDU type: {0}'.format(exc),
AstropyUserWarning)
warnings.warn('The HDU will be treated as corrupted.',
AstropyUserWarning)
klass = _CorruptedHDU
del exc
break
return klass
class _BaseHDUMeta(type):
def __init__(cls, name, bases, members):
# The sole purpose of this metaclass right now is to add the same
# data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes
if 'data' in members:
data_prop = members['data']
if (isinstance(data_prop, (lazyproperty, property)) and
data_prop.fdel is None):
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__['data']
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if data_refcount == 2:
self._file._maybe_close_mmap()
setattr(cls, 'data', data_prop.deleter(data))
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU(metaclass=_BaseHDUMeta):
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = '\x00'
_default_name = ''
# _header uses a descriptor to delay the loading of the fits.Header object
# until it is necessary.
_header = _DelayedHeader()
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._header_str = None
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if 'DATASUM' in self._header and 'CHECKSUM' not in self._header:
self._output_checksum = 'datasum'
elif 'CHECKSUM' in self._header:
self._output_checksum = True
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self._header:
self._header['EXTNAME'] = value
else:
self._header['EXTNAME'] = (value, 'extension name')
@property
def ver(self):
return self._header.get('EXTVER', 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if 'EXTVER' in self._header:
self._header['EXTVER'] = value
else:
self._header['EXTVER'] = (value, 'extension value')
@property
def level(self):
return self._header.get('EXTLEVEL', 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if 'EXTLEVEL' in self._header:
self._header['EXTLEVEL'] = value
else:
self._header['EXTLEVEL'] = (value, 'extension level')
@property
def is_image(self):
return (
self.name == 'PRIMARY' or
('XTENSION' in self._header and
(self._header['XTENSION'] == 'IMAGE' or
(self._header['XTENSION'] == 'BINTABLE' and
'ZIMAGE' in self._header and self._header['ZIMAGE'] is True))))
@property
def _data_loaded(self):
return ('data' in self.__dict__ and self.data is not DELAYED)
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(data, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file object or file-like object
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(fileobj, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : file path, file object or file-like object
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
@classmethod
def _from_data(cls, data, header, **kwargs):
"""
Instantiate the HDU object after guessing the HDU class from the
FITS Header.
"""
klass = _hdu_class_from_header(cls, header)
return klass(data=data, header=header, **kwargs)
@classmethod
def _readfrom_internal(cls, data, header=None, checksum=False,
ignore_missing_end=False, **kwargs):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
try:
# First we try to read the header with the fast parser
# from _BasicHeader, which will read only the standard
# 8 character keywords to get the structural keywords
# that are needed to build the HDU object.
header_str, header = _BasicHeader.fromfile(data)
except Exception:
# If the fast header parsing failed, then fallback to
# the classic Header parser, which has better support
# and reporting for the various issues that can be found
# in the wild.
data.seek(header_offset)
header = Header.fromfile(data,
endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {!r} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
.format(data))
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx:idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, '', not ignore_missing_end, True)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
hdu = cls(data=DELAYED, header=header, **new_kwargs)
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
if isinstance(hdu._header, _BasicHeader):
# Delete the temporary _BasicHeader.
# We need to do this before an eventual checksum computation,
# since it needs to modify temporarily the header
#
# The header string is stored in the HDU._header_str attribute,
# so that it can be used directly when we need to create the
# classic Header object, without having to parse again the file.
del hdu._header
hdu._header_str = header_str
# Checksums are not checked on invalid HDU types
if checksum and checksum != 'remove' and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, int):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer,
offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_uint_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_uint_scale_keywords(self):
"""
If the data is unsigned int 16, 32, or 64 add BSCALE/BZERO cards to
header.
"""
if (self._has_data and self._standard and
_is_pseudo_unsigned(self.data.dtype)):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if 'TFIELDS' in self._header:
self._header.set('BSCALE', 1, after='TFIELDS')
elif 'GCOUNT' in self._header:
self._header.set('BSCALE', 1, after='GCOUNT')
else:
self._header.set('BSCALE', 1)
self._header.set('BZERO', _unsigned_zero(self.data.dtype),
after='BSCALE')
def _update_checksum(self, checksum, checksum_keyword='CHECKSUM',
datasum_keyword='DATASUM'):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == 'remove':
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (modified or self._new or
(checksum and ('CHECKSUM' not in self._header or
'DATASUM' not in self._header or
not self._checksum_valid or
not self._datasum_valid))):
if checksum == 'datasum':
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(checksum_keyword=checksum_keyword,
datasum_keyword=datasum_keyword)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if (self._has_data and self._standard and
_is_pseudo_unsigned(self.data.dtype)):
for keyword in ('BSCALE', 'BZERO'):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
if not fileobj.simulateonly:
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
else:
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
# TODO: A lot of the simulateonly stuff should be moved back into the
# _File class--basically it should turn write and flush into a noop
offset = 0
size = 0
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
if size > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode('ascii'))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
if not fileobj.simulateonly:
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
if not fileobj.simulateonly:
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, 'ubyte', self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if (closed and self._data_loaded and
_get_array_mmap(self.data) is not None):
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, 'CorruptedHDU')
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == 'SIMPLE':
if 'GROUPS' not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
if not fileobj.simulateonly:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, 'NonstandardHDU', len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
if (header is not None and
not isinstance(header, (Header, _BasicHeader))):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError('header must be a Header object')
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ('SIMPLE', 'XTENSION')
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, '_file') and self._file:
return {'file': self._file, 'filemode': self._file.mode,
'hdrLoc': self._header_offset, 'datLoc': self._data_offset,
'datSpan': self._data_size}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._extension
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards('BITPIX', 1, lambda v: (_is_int(v) and is_valid(v)), 8,
option, errs)
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 0 <= v <= 999), 0,
option, errs)
naxis = self._header.get('NAXIS', 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = 'NAXIS' + str(ax - 2)
self.req_cards(key, ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option, errs)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith('NAXIS') and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = ("NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis))
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(option=option, err_text=err_text,
fix=fix, fix_text="Deleted."))
# Verify that the EXTNAME keyword exists and is a string
if 'EXTNAME' in self._header:
if not isinstance(self._header['EXTNAME'], str):
err_text = 'The EXTNAME keyword must have a string value.'
fix_text = 'Converted the EXTNAME keyword to a string value.'
def fix(header=self._header):
header['EXTNAME'] = str(header['EXTNAME'])
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = "'{}' card does not exist.".format(keyword)
fix_text = "Fixed by inserting a new '{}' card.".format(keyword)
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = ("'{}' card at the wrong place "
"(card {}).".format(keyword, index))
fix_text = ("Fixed by moving it to the right place "
"(card {}).".format(insert_pos))
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = ("'{}' card has invalid value '{}'.".format(
keyword, val))
fix_text = ("Fixed by setting a new value '{}'.".format(
fix_value))
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
return errs
def add_datasum(self, when=None, datasum_keyword='DATASUM'):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = 'data unit checksum updated {}'.format(self._get_timestamp())
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(self, when=None, override_datasum=False,
checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = 'HDU checksum updated {}'.format(self._get_timestamp())
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, '0' * 16, when,
before=datasum_keyword)
else:
self._header.set(checksum_keyword, '0' * 16, when)
csum = self._calculate_checksum(data_cs,
checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header['DATASUM']):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if 'CHECKSUM' in self._header:
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header['CHECKSUM']:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if 'CHECKSUM' in self._header:
self._checksum = self._header['CHECKSUM']
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
'Checksum verification failed for HDU {0}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
if 'DATASUM' in self._header:
self._datasum = self._header['DATASUM']
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
'Datasum verification failed for HDU {0}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(self._data_size, 'ubyte',
self._data_offset)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view('ubyte'))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword='CHECKSUM'):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = '0' * 16
# Convert the header to bytes.
s = self._header.tostring().encode('utf8')
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype='ubyte'), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i:i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view('>u2')
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000,
0x00FF0000,
0x0000FF00,
0x000000FF]
_EXCLUDE = [0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60]
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord('0')
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient],
dtype='int32')
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype='byte')
ascii = np.zeros((16,), dtype='byte')
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tostring())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ''
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get('NAXIS', 0)
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v >= 0),
0, option, errs)
self.req_cards('GCOUNT', naxis + 4, lambda v: (_is_int(v) and v == 1),
1, option, errs)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ('IMAGE', 'TABLE', 'BINTABLE', 'A3DTABLE')
# The check that xtension is not one of the standard types should be
# redundant.
return (card.keyword == 'XTENSION' and
xtension not in standard_xtensions)
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, 'NonstandardExtHDU', len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
2cb61d3e4eae1c7f7e4ebb19c2771426f217a189805fded12d47549cfd077b79 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.io.fits.column import Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print('field {0} type differs'.format(i))
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print('field {0} differs'.format(i))
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print('fielda[{0}]: {1}'.format(row, fielda[row]))
print('fieldb[{0}]: {1}'.format(row, fieldb[row]))
print('field {0} differs in row {1}'.format(i, row))
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print('field {0} differs'.format(i))
return False
return True
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
xx = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, u'Serius', -1.45, u'A1V'),
(2, u'Canopys', -0.73, u'F0Ib'),
(3, u'Rigil Kent', -0.1, u'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
@pytest.mark.xfail(not NUMPY_LT_1_14_1 and NUMPY_LT_1_14_2,
reason="See https://github.com/astropy/astropy/issues/7214")
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# numpy >= 1.12 changes how structured arrays are printed, so we
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # nopep8
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # nopep8
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # nopep8
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # nopep8
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # nopep8
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name=u'spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
t.close()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tostring().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tostring().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
with ignore_warnings():
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tostring().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tostring().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
# Modify the TDIM fields to my own specification
thdu.header['TDIM1'] = '(2,3)'
thdu.header['TDIM2'] = '(4,2)'
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
with catch_warnings() as w:
tbhdu1 = fits.BinTableHDU(data=arr)
assert len(w) == 0
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_dump_load_round_trip(self):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
# Double check that the headers are equivalent
assert str(tbhdu.header) == str(new_tbhdu.header)
hdul.close()
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name conincides
with an existing attribute/method of the array, the existing name takes
precence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = u' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace(u'2 ', u' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace(u'3.00000000', u' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
direclty from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
with ignore_warnings():
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
with ignore_warnings():
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif(str('not HAVE_OBJGRAPH'))
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif(str('not HAVE_OBJGRAPH'))
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_clobber_vs_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
tbhdu.dump(datafile, cdfile, hfile, clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tostring() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match='Field 2 has a repeat count '
'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
c = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('col', format='I', null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_ref_point='1', coord_ref_value='1',
coord_inc='1', time_ref_pos=1)
err_msgs = ['keyword arguments to Column were invalid', 'TNULL', 'TDISP',
'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with catch_warnings() as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith("The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with pytest.warns(None) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
|
298e95b40d6e09dc75328e24c4f38d1b41ae060908726ee29293fb7676228b55 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import platform
import sys
import copy
import subprocess
import pytest
import numpy as np
from astropy.io.fits.verify import VerifyError
from astropy.io import fits
from astropy.tests.helper import raises, catch_warnings, ignore_warnings
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
@raises(ValueError)
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
@raises(ValueError)
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data('tb.fits')) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
with fits.open(self.data('tb.fits')) as hdul:
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data('test0.fits')) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(AstropyUserWarning) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp('temp.fits'), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data('test0.fits'),
do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(AstropyUserWarning) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
@pytest.mark.xfail(platform.system() == 'Windows',
reason='https://github.com/astropy/astropy/issues/5797')
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
@ignore_warnings()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith('variable_length_table.fits'):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite_vs_clobber(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
f.close()
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
import codecs
filename = self.temp('not-fits-with-unicode.fits')
with codecs.open(filename, mode='w', encoding='utf=8') as f:
f.write(u'Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError):
with pytest.warns(AstropyUserWarning, match='non-ASCII characters '
'are present in the FITS file header'):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised, but a check is still included.
#
with catch_warnings(ResourceWarning) as ws:
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=True)
assert len(ws) == 0
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
Ensure that an HDUList can be written to a stream in Python 2
"""
data = np.array([[1,2,3],[4,5,6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp('test.fits'), 'wb') as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=fout) as p:
hdulist.writeto(p.stdin)
|
ade955b82baa257f95f83a28bd1db3e153d73b86ba102c3123706a589a7e1dc5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import warnings
import pytest
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.io.fits import printdiff
from astropy.tests.helper import catch_warnings
from . import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter('always', ResourceWarning)
with catch_warnings() as w:
data = fits.getdata(self.data('test0.fits'))
assert len(w) == 0
with catch_warnings() as w:
header = fits.getheader(self.data('test0.fits'))
assert len(w) == 0
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data('test0.fits'), 'rb')
data = fits.getdata(f)
assert not f.closed
f.seek(0)
header = fits.getheader(f)
assert not f.closed
f.close() # Close it now
def test_table_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
with catch_warnings() as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table.meta['comments'] = ['This', 'is', 'a', 'comment']
hdu = fits.table_to_hdu(table)
assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment']
with pytest.raises(ValueError):
hdu.header.index('comments')
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5, ), dtype=[('x', float), ('y', int)])
h_in = fits.Header()
h_in['ANSWER'] = (42.0, 'LTU&E')
filename = self.temp('tabhdr42.fits')
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out['ANSWER'] == 42
def test_image_extension_update_header(self):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp('twoextension.fits')
hdus = [fits.PrimaryHDU(np.zeros((10, 10))),
fits.ImageHDU(np.zeros((10, 10)))]
fits.HDUList(hdus).writeto(filename)
fits.update(filename,
np.zeros((10, 10)),
header=fits.Header([('WHAT', 100)]),
ext=1)
h_out = fits.getheader(filename, ext=1)
assert h_out['WHAT'] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data('arange.fits'),
self.data('blank.fits')) is None
assert printdiff(self.data('arange.fits'),
self.data('blank.fits'), ext=0) is None
assert printdiff(self.data('o4sp040b0_raw.fits'),
self.data('o4sp040b0_raw.fits'),
extname='sci') is None
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')
# Test HDU object inputs
with fits.open(self.data('stddata.fits'), mode='readonly') as in1:
with fits.open(self.data('checksum.fits'), mode='readonly') as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6937
"""
# copy fits file to the temp directory
self.copy_file('tb.fits')
# test without datafile
fits.tabledump(self.temp('tb.fits'))
assert os.path.isfile(self.temp('tb_1.txt'))
# test with datafile
fits.tabledump(self.temp('tb.fits'), datafile=self.temp('test_tb.txt'))
assert os.path.isfile(self.temp('test_tb.txt'))
def test_append_filename(self):
"""
Test fits.append with a filename argument.
"""
data = np.arange(6)
testfile = self.temp('test_append_1.fits')
# Test case 1: creation of file
fits.append(testfile, data=data, checksum=True)
# Test case 2: append to existing file, with verify=True
# Also test that additional keyword can be passed to fitsopen
fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)
# Test case 3: append to existing file, with verify=False
fits.append(testfile, data=data * 3, checksum=True, verify=False)
with fits.open(testfile, checksum=True) as hdu1:
np.testing.assert_array_equal(hdu1[0].data, data)
np.testing.assert_array_equal(hdu1[1].data, data * 2)
np.testing.assert_array_equal(hdu1[2].data, data * 3)
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_append_filehandle(self, tmpdir, mode):
"""
Test fits.append with a file handle argument.
"""
append_file = tmpdir.join('append.fits')
with append_file.open(mode) as handle:
fits.append(filename=handle, data=np.ones((4, 4)))
def test_append_with_header(self):
"""
Test fits.append with a fits Header, which triggers detection of the
HDU class. Regression test for
https://github.com/astropy/astropy/issues/8660
"""
testfile = self.temp('test_append_1.fits')
with fits.open(self.data('test0.fits')) as hdus:
for hdu in hdus:
fits.append(testfile, hdu.data, hdu.header, checksum=True)
with fits.open(testfile, checksum=True) as hdus:
assert len(hdus) == 5
|
9dc8a4ca846099518052fe3a438cf3ca709c6e933c874105e65f48efce90a3c4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.io.fits.column import Column
from astropy.io.fits.diff import (FITSDiff, HeaderDiff, ImageDataDiff, TableDataDiff,
HDUDiff)
from astropy.io.fits.hdu import HDUList, PrimaryHDU, ImageHDU
from astropy.io.fits.hdu.table import BinTableHDU
from astropy.io.fits.header import Header
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.io import fits
from . import FitsTestCase
class TestDiff(FitsTestCase):
def test_identical_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha.tostring(), hb.tostring()).identical
with pytest.raises(TypeError):
HeaderDiff(1, 2)
def test_slightly_different_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
assert not HeaderDiff(ha, hb).identical
def test_common_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C']
def test_different_keyword_count(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
del hb['B']
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_count == (3, 2)
# But make sure the common keywords are at least correct
assert diff.common_keywords == ['A', 'C']
def test_different_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
ha['E'] = (6, 'Comment')
ha['F'] = (7, 'Comment')
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keywords == (['E', 'F'], ['D'])
def test_different_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 4)]}
def test_different_keyword_comments(self):
ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')])
hb = ha.copy()
hb.comments['C'] = 'comment 2'
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_comments ==
{'C': [('comment 1', 'comment 2')]})
def test_different_keyword_values_with_duplicate(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('C', 4))
hb.append(('C', 5))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [None, (4, 5)]}
def test_asymmetric_duplicate_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('A', 2, 'comment 1'))
ha.append(('A', 3, 'comment 2'))
hb.append(('B', 4, 'comment 3'))
hb.append(('C', 5, 'comment 4'))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {}
assert (diff.diff_duplicate_keywords ==
{'A': (3, 1), 'B': (1, 2), 'C': (1, 2)})
report = diff.report()
assert ("Inconsistent duplicates of keyword 'A' :\n"
" Occurs 3 time(s) in a, 1 times in (b)") in report
def test_floating_point_rtol(self):
ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)])
hb = ha.copy()
hb['B'] = 2.00002
hb['C'] = 3.000002
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]})
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert diff.identical
def test_floating_point_atol(self):
ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.000001
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, atol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, atol=1.1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6)
assert not diff.identical
def test_deprecation_tolerance(self):
"""Verify uses of tolerance and rtol.
This test should be removed in the next astropy version."""
ha = Header([('B', 1.0), ('C', 0.1)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.100001
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diff = HeaderDiff(ha, hb, tolerance=1e-6)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert (diff.diff_keyword_values == {'C': [(0.1, 0.100001)],
'B': [(1.0, 1.00001)]})
assert not diff.identical
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
# `rtol` is always ignored when `tolerance` is provided
diff = HeaderDiff(ha, hb, rtol=1e-6, tolerance=1e-5)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert diff.identical
def test_ignore_blanks(self):
with fits.conf.set_temp('strip_header_whitespace', False):
ha = Header([('A', 1), ('B', 2), ('C', 'A ')])
hb = ha.copy()
hb['C'] = 'A'
assert ha['C'] != hb['C']
diff = HeaderDiff(ha, hb)
# Trailing blanks are ignored by default
assert diff.identical
assert diff.diff_keyword_values == {}
# Don't ignore blanks
diff = HeaderDiff(ha, hb, ignore_blanks=False)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [('A ', 'A')]}
def test_ignore_blank_cards(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152
Ignore blank cards.
"""
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)])
hc = ha.copy()
hc.append()
hc.append()
# We now have a header with interleaved blanks, and a header with end
# blanks, both of which should ignore the blanks
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha, hc).identical
assert HeaderDiff(hb, hc).identical
assert not HeaderDiff(ha, hb, ignore_blank_cards=False).identical
assert not HeaderDiff(ha, hc, ignore_blank_cards=False).identical
# Both hb and hc have the same number of blank cards; since order is
# currently ignored, these should still be identical even if blank
# cards are not ignored
assert HeaderDiff(hb, hc, ignore_blank_cards=False).identical
hc.append()
# But now there are different numbers of blanks, so they should not be
# ignored:
assert not HeaderDiff(hb, hc, ignore_blank_cards=False).identical
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy()
ha = Header([('A', 1), ('B', 2), ('C', 3)])
xa = np.array([(1.0, 1), (3.0, 4)], dtype=[('x', float), ('y', int)])
xb = np.array([(1.0, 2), (3.0, 5)], dtype=[('x', float), ('y', int)])
phdu = PrimaryHDU(header=ha)
ihdua = ImageHDU(data=a, name='SCI')
ihdub = ImageHDU(data=b, name='SCI')
bhdu1 = BinTableHDU(data=xa, name='ASDF')
bhdu2 = BinTableHDU(data=xb, name='ASDF')
hdula = HDUList([phdu, ihdua, bhdu1])
hdulb = HDUList([phdu, ihdub, bhdu2])
# ASDF extension should be different
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 2
# ASDF extension should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert diff.identical, diff.report()
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASD*'])
assert diff.identical, diff.report()
# SCI extension should be different
hdulb['SCI'].data += 1
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert not diff.identical
# SCI and ASDF extensions should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert diff.identical, diff.report()
# All EXTVER of SCI should be ignored
ihduc = ImageHDU(data=a, name='SCI', ver=2)
hdulb.append(ihduc)
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert not any(diff.diff_hdus), diff.report()
assert any(diff.diff_hdu_count), diff.report()
def test_ignore_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['B'] = 4
hb['C'] = 5
diff = HeaderDiff(ha, hb, ignore_keywords=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_keywords=['B'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
report = diff.report()
assert 'Keyword B has different values' not in report
assert 'Keyword C has different values' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_keywords=['b'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
def test_ignore_keyword_comments(self):
ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')])
hb = ha.copy()
hb.comments['B'] = 'D'
hb.comments['C'] = 'E'
diff = HeaderDiff(ha, hb, ignore_comments=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_comments=['B'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
report = diff.report()
assert 'Keyword B has different comments' not in report
assert 'Keyword C has different comments' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_comments=['b'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
def test_trivial_identical_images(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
diff = ImageDataDiff(ia, ib)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_relative_tolerance(self):
ia = np.ones((10, 10)) - 0.00001
ib = np.ones((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_absolute_tolerance(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert not diff.identical
assert diff.diff_total == 100
diff = ImageDataDiff(ia, ib, atol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5)
assert diff.identical
assert diff.diff_total == 0
def test_not_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6)
assert not diff.identical
assert diff.diff_total == 100
def test_identical_comp_image_hdus(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189
For this test we mostly just care that comparing to compressed images
does not crash, and returns the correct results. Two compressed images
will be considered identical if the decompressed data is the same.
Obviously we test whether or not the same compression was used by
looking for (or ignoring) header differences.
"""
data = np.arange(100.0).reshape(10, 10)
hdu = fits.CompImageHDU(data=data)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdula, \
fits.open(self.temp('test.fits')) as hdulb:
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_different_dimensions(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100) - 1
# Although ib could be reshaped into the same dimensions, for now the
# data is not compared anyways
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ((10, 10), (100,))
assert diff.diff_total == 0
report = diff.report()
assert 'Data dimensions differ' in report
assert 'a: 10 x 10' in report
assert 'b: 100' in report
assert 'No further data comparison performed.'
def test_different_pixels(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
ib[0, 0] = 10
ib[5, 5] = 20
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ()
assert diff.diff_total == 2
assert diff.diff_ratio == 0.02
assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))]
def test_identical_tables(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('D', format='J', bscale=2.0, array=[0, 1])
c5 = Column('E', format='A3', array=['abc', 'def'])
c6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]
ta = BinTableHDU.from_columns(columns)
tb = BinTableHDU.from_columns([c.copy() for c in columns])
diff = TableDataDiff(ta.data, tb.data)
assert diff.identical
assert len(diff.common_columns) == 10
assert diff.common_column_names == set('abcdefghij')
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_diff_empty_tables(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178
Ensure that diffing tables containing empty data doesn't crash.
"""
c1 = Column('D', format='J')
c2 = Column('E', format='J')
thdu = BinTableHDU.from_columns([c1, c2], nrows=0)
hdula = fits.HDUList([thdu])
hdulb = fits.HDUList([thdu])
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_ignore_table_fields(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('B', format='X', array=[[1], [0]])
c5 = Column('C', format='4I', dim='(2, 2)',
array=[[1, 2, 3, 4], [5, 6, 7, 8]])
ta = BinTableHDU.from_columns([c1, c2, c3])
tb = BinTableHDU.from_columns([c1, c4, c5])
diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C'])
assert diff.identical
# The only common column should be c1
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_different_table_field_names(self):
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([ca, cb])
tb = BinTableHDU.from_columns([ca, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_column_names == (['B'], ['C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert 'Extra column B of format L in a' in report
assert 'Extra column C of format L in b' in report
def test_different_table_field_counts(self):
"""
Test tables with some common columns, but different number of columns
overall.
"""
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([cb])
tb = BinTableHDU.from_columns([ca, cb, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == (1, 3)
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'b'}
assert diff.diff_column_names == ([], ['A', 'C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert ' Tables have different number of columns:' in report
assert ' a: 1\n b: 3' in report
def test_different_table_rows(self):
"""
Test tables that are otherwise identical but one has more rows than the
other.
"""
ca1 = Column('A', format='L', array=[True, False])
cb1 = Column('B', format='L', array=[True, False])
ca2 = Column('A', format='L', array=[True, False, True])
cb2 = Column('B', format='L', array=[True, False, True])
ta = BinTableHDU.from_columns([ca1, cb1])
tb = BinTableHDU.from_columns([ca2, cb2])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == ()
assert len(diff.common_columns) == 2
assert diff.diff_rows == (2, 3)
assert diff.diff_values == []
report = diff.report()
assert 'Table rows differ' in report
assert 'a: 2' in report
assert 'b: 3' in report
assert 'No further data comparison performed.'
def test_different_table_data(self):
"""
Test diffing table data on columns of several different data formats
and dimensions.
"""
ca1 = Column('A', format='L', array=[True, False])
ca2 = Column('B', format='X', array=[[0], [1]])
ca3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0])
ca5 = Column('E', format='A3', array=['abc', 'def'])
ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
cb1 = Column('A', format='L', array=[False, False])
cb2 = Column('B', format='X', array=[[0], [0]])
cb3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [5, 6, 7, 8]])
cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0])
cb5 = Column('E', format='A3', array=['abc', 'ghi'])
cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0])
cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0])
cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j])
cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j])
cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]])
ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7,
ca8, ca9, ca10])
tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7,
cb8, cb9, cb10])
diff = TableDataDiff(ta.data, tb.data, numdiffs=20)
assert not diff.identical
# The column definitions are the same, but not the column values
assert diff.diff_columns == ()
assert diff.diff_values[0] == (('A', 0), (True, False))
assert diff.diff_values[1] == (('B', 1), ([1], [0]))
assert diff.diff_values[2][0] == ('C', 1)
assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all()
assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all()
assert diff.diff_values[3] == (('D', 0), (0, 2.0))
assert diff.diff_values[4] == (('E', 1), ('def', 'ghi'))
assert diff.diff_values[5] == (('F', 0), (0.0, 1.0))
assert diff.diff_values[6] == (('F', 1), (1.0, 2.0))
assert diff.diff_values[7] == (('G', 0), (0.0, 2.0))
assert diff.diff_values[8] == (('G', 1), (1.0, 3.0))
assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j))
assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j))
assert diff.diff_values[11][0] == ('J', 0)
assert (diff.diff_values[11][1][0] == [0, 1]).all()
assert (diff.diff_values[11][1][1] == [1, 2]).all()
assert diff.diff_values[12][0] == ('J', 1)
assert (diff.diff_values[12][1][0] == [2, 3]).all()
assert (diff.diff_values[12][1][1] == [3, 4]).all()
assert diff.diff_total == 13
assert diff.diff_ratio == 0.65
report = diff.report()
assert ('Column A data differs in row 0:\n'
' a> True\n'
' b> False') in report
assert ('...and at 1 more indices.\n'
' Column D data differs in row 0:') in report
assert ('13 different table data element(s) found (65.00% different)'
in report)
assert report.count('more indices') == 1
def test_identical_files_basic(self):
"""Test identicality of two simple, extensionless files."""
a = np.arange(100).reshape(10, 10)
hdu = PrimaryHDU(data=a)
hdu.writeto(self.temp('testa.fits'))
hdu.writeto(self.temp('testb.fits'))
diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits'))
assert diff.identical
report = diff.report()
# Primary HDUs should contain no differences
assert 'Primary HDU' not in report
assert 'Extension HDU' not in report
assert 'No differences found.' in report
a = np.arange(10)
ehdu = ImageHDU(data=a)
diff = HDUDiff(ehdu, ehdu)
assert diff.identical
report = diff.report()
assert 'No differences found.' in report
def test_partially_identical_files1(self):
"""
Test files that have some identical HDUs but a different extension
count.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
hdula = HDUList([phdu, ehdu])
hdulb = HDUList([phdu, ehdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == (2, 3)
# diff_hdus should be empty, since the third extension in hdulb
# has nothing to compare against
assert diff.diff_hdus == []
report = diff.report()
assert 'Files contain different numbers of HDUs' in report
assert 'a: 2\n b: 3' in report
assert 'No differences found between common HDUs' in report
def test_partially_identical_files2(self):
"""
Test files that have some identical HDUs but one different HDU.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
ehdu2 = ImageHDU(data=(a + 1))
hdula = HDUList([phdu, ehdu, ehdu])
hdulb = HDUList([phdu, ehdu2, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == ()
assert len(diff.diff_hdus) == 1
assert diff.diff_hdus[0][0] == 1
hdudiff = diff.diff_hdus[0][1]
assert not hdudiff.identical
assert hdudiff.diff_extnames == ()
assert hdudiff.diff_extvers == ()
assert hdudiff.diff_extension_types == ()
assert hdudiff.diff_headers.identical
assert hdudiff.diff_data is not None
datadiff = hdudiff.diff_data
assert isinstance(datadiff, ImageDataDiff)
assert not datadiff.identical
assert datadiff.diff_dimensions == ()
assert (datadiff.diff_pixels ==
[((0, y), (y, y + 1)) for y in range(10)])
assert datadiff.diff_ratio == 1.0
assert datadiff.diff_total == 100
report = diff.report()
# Primary HDU and 2nd extension HDU should have no differences
assert 'Primary HDU' not in report
assert 'Extension HDU 2' not in report
assert 'Extension HDU 1' in report
assert 'Headers contain differences' not in report
assert 'Data contains differences' in report
for y in range(10):
assert 'Data differs at [{}, 1]'.format(y + 1) in report
assert '100 different pixels found (100.00% different).' in report
def test_partially_identical_files3(self):
"""
Test files that have some identical HDUs but a different extension
name.
"""
phdu = PrimaryHDU()
ehdu = ImageHDU(name='FOO')
hdula = HDUList([phdu, ehdu])
ehdu = BinTableHDU(name='BAR')
ehdu.header['EXTVER'] = 2
ehdu.header['EXTLEVEL'] = 3
hdulb = HDUList([phdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 1
hdu_diff = diff.diff_hdus[0][1]
assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE')
assert hdu_diff.diff_extnames == ('FOO', 'BAR')
assert hdu_diff.diff_extvers == (1, 2)
assert hdu_diff.diff_extlevels == (1, 3)
report = diff.report()
assert 'Extension types differ' in report
assert 'a: IMAGE\n b: BINTABLE' in report
assert 'Extension names differ' in report
assert 'a: FOO\n b: BAR' in report
assert 'Extension versions differ' in report
assert 'a: 1\n b: 2' in report
assert 'Extension levels differ' in report
assert 'a: 1\n b: 2' in report
def test_diff_nans(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204
"""
# First test some arrays that should be equivalent....
arr = np.empty((10, 10), dtype=np.float64)
arr[:5] = 1.0
arr[5:] = np.nan
arr2 = arr.copy()
table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)],
names=['cola', 'colb']).view(fits.FITS_rec)
table2 = table.copy()
assert ImageDataDiff(arr, arr2).identical
assert TableDataDiff(table, table2).identical
# Now let's introduce some differences, where there are nans and where
# there are not nans
arr2[0][0] = 2.0
arr2[5][0] = 2.0
table2[0][0] = 2.0
table2[1][1] = 2.0
diff = ImageDataDiff(arr, arr2)
assert not diff.identical
assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0))
assert diff.diff_pixels[1][0] == (5, 0)
assert np.isnan(diff.diff_pixels[1][1][0])
assert diff.diff_pixels[1][1][1] == 2.0
diff = TableDataDiff(table, table2)
assert not diff.identical
assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0))
assert diff.diff_values[1][0] == ('colb', 1)
assert np.isnan(diff.diff_values[1][1][0])
assert diff.diff_values[1][1][1] == 2.0
def test_file_output_from_path_string(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
with open(outpath) as fout:
assert fout.read() == report_as_string
def test_file_output_overwrite_safety(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with pytest.raises(OSError):
diffobj.report(fileobj=outpath)
def test_file_output_overwrite_success(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
diffobj.report(fileobj=outpath, overwrite=True)
with open(outpath) as fout:
assert fout.read() == report_as_string, (
"overwritten output file is not identical to report string")
def test_file_output_overwrite_vs_clobber(self):
"""Verify uses of clobber and overwrite."""
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diffobj.report(fileobj=outpath, clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
|
b518198a09e0e5aa8f2cc034689b05bf39680292345db8b279713880d82ee3c9 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import warnings
import collections
from io import StringIO, BytesIO
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.utils.exceptions import AstropyUserWarning
from . import FitsTestCase
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([('a', 1), ('b', 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header['c'] = 100
assert 'c' not in copied_header
# and changing the copy should not change the original.
copied_header['a'] = 0
assert original_header['a'] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([('a', 10)])
new_header = fits.Header(original_header, copy=True)
original_header['a'] = 20
assert new_header['a'] == 10
new_header['a'] = 0
assert original_header['a'] == 20
def test_init_with_dict():
dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_keyword('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert '' == c.keyword
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == 'ABC'
assert c.value == 'abc'
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card('nullstr', '')
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring('ABC = F')
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and
str(c) != _pad("FLOATNUM= -4.6737463674763E+032")):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
(1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
with ignore_warnings():
c = fits.Card('abc', 9, 'abcde' * 20)
assert (str(c) ==
"ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab")
c = fits.Card('abc', 'a' * 68, 'abcdefg')
assert str(c) == "ABC = '{}'".format('a' * 68)
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
pytest.raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
with catch_warnings():
warnings.simplefilter('error')
pytest.raises(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with catch_warnings() as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_add_commentary(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({'FOO': ('BAR', 'BAZ')})
header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))
assert set(header.keys()) == {'FOO', 'A', 'B'}
assert header.comments['B'] == 'comment'
# test that comments are preserved
tmphdr = fits.Header()
tmphdr['HELLO'] = (1, 'this is a comment')
header.update(tmphdr)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}
assert header.comments['HELLO'] == 'this is a comment'
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}
assert set(header.values()) == {'BAR', 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update({'FOO': ('BAR', 'BAZ')})
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.comments['FOO'] == 'BAZ'
with pytest.raises(ValueError):
hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})
hdul.writeto(self.temp('test.fits'))
hdul.close()
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
hdul[0].header.add_comment(0, after='FOO')
assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad('HISTORY ' + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad('COMMENT ' + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value.")
assert (c.value == 'card has no comments. '
'/ text after slash is still part of the value.')
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part ofthe
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == _pad(' / EXPOSURE INFORMATION')
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes "
"/ let's also try the comment")
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment ")
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning,
match='header keyword is invalid'):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning,
match='header keyword is invalid'):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with catch_warnings() as w:
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
c = fits.Card.fromstring('ABC= a6')
with catch_warnings() as w:
c.verify('fix')
fix_text = "Fixed 'ABC' card to meet the FITS standard."
assert len(w) == 4
assert fix_text in str(w[1].message)
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
with catch_warnings() as w:
h2['TEST'] = 'abcdefg' * 30
assert len(w) == 0
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert (repr(header).splitlines() ==
[str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' "
"/ comments in line 1") +
_pad("continue 'continue with long string but without the "
"ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' "
"/ comments with ''. "))
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h['SVALUE'] = 'A' * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)
assert (str(c) ==
"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & ")
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with catch_warnings() as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert 'HIERARCH card will be created' in str(w[0].message)
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert (str(header.cards[header.index('key.META_0')]) ==
str(header2.cards[header2.index('key.META_0')]))
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
pytest.raises(KeyError, lambda k: header[k], 'NAXIS')
# Test the exception message
try:
header['NAXIS']
except KeyError as e:
assert e.args[0] == "Keyword 'NAXIS' not found."
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_card_delete(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
del header['hierarch abcdefghi']
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(fits.verify.VerifyWarning,
match='greater than 8 characters'):
header['abcdefghi'] = 10
header['abcdefgh'] = 10
header['abcdefg'] = 10
with pytest.warns(fits.verify.VerifyWarning,
match='greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header['abcdefghij']
with pytest.warns(fits.verify.VerifyWarning,
match='greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header[2]
assert list(header.keys())[2] == 'abcdefg'.upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with catch_warnings(VerifyWarning) as w:
header.update({'HIERARCH BLAH BLAH': 'TESTA'})
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update({'HIERARCH BLAH BLAH': 'TESTB'})
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH': 'TESTC'})
assert len(w) == 1
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update({'blah blah': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({'BLAH BLAH BLAH': 'TESTA'})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})
assert len(w) == 3
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH BLAH': 'TESTC'})
assert len(w) == 4
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah blah': 'TESTD'})
assert len(w) == 4
assert header['blah blah blah'], 'TESTD'
header.update({'blah blah blah': 'TESTE'})
assert len(w) == 5
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with catch_warnings(VerifyWarning) as w:
header.update({'HIERARCH BLA BLA': 'TESTA'})
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with catch_warnings(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({'BLA BLA': 'TESTA'})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 2
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 3
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
header['FOO2'] = (None,)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
header['FOO2'] = (None, None)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
assert header.comments['FOO2'] == ''
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep='\n')
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header['UNDEF3'] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header['DEFINED'] == 42
assert header['UNDEF'] is None
assert header['UNDEF2'] is None
assert header['UNDEF3'] is None
assert header['UNDEF5'] is None
assert header['UNDEF6'] is None
# Assign an undefined value to a new card
header['UNDEF4'] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] is None
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] is None
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == [('A', 'B'), ('C', 'D')]
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.values(), ['B', 'D']):
assert a == b
def test_header_keys(self):
with fits.open(self.data('arange.fits')) as hdul:
assert (list(hdul[0].header) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
pytest.raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
pytest.raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
pytest.raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header)[-1] == 'E'
assert header[-1] is None
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header)[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# When keyword is present in the header it should be removed.
header.remove('C')
assert len(header) == 1
assert list(header) == ['A']
assert 'C' not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove('F')
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove('F', ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])
header.remove('A', remove_all=True)
assert 'A' not in header
assert len(header) == 1
assert list(header) == ['C']
assert header[0] == 'D'
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert (repr(header.comments) ==
' A C\n'
' DEF H')
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header)[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
header = fits.Header()
header.update({'FOO': 'BAR'})
header.update({'BAZ': 'QUX'})
longval = 'ABC' * 30
header.add_history(longval)
header.update({'FRED': 'BARNEY'})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.cards) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_totxtfile(self):
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.totextfile(self.temp('header.txt'))
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
with ignore_warnings():
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711
"""
filename = self.data('scale.fits')
hdr = fits.Header.fromfile(filename)
assert hdr['DATASET'] == '2MASS'
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, 'END', '')
pytest.raises(ValueError, header.append, 'END')
pytest.raises(ValueError, header.append, 'END', end=True)
pytest.raises(ValueError, header.insert, len(header), 'END')
pytest.raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appened by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with catch_warnings() as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: ' ='")
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with catch_warnings() as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: ' ='")
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with catch_warnings() as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Unexpected bytes trailing END keyword: '$%&%^*%*'")
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with catch_warnings() as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
assert str(w[0].message).startswith(
"Missing padding to end of the FITS block")
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h['FOO'] = 'BAR'
h['COMMENT'] = 'hello'
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
out = f.read()
out = out.replace(b'hello', u'héllo'.encode('latin1'))
out = out.replace(b'BAR', u'BÀR'.encode('latin1'))
with open(self.temp('test2.fits'), 'wb') as f2:
f2.write(out)
with catch_warnings() as w:
h = fits.getheader(self.temp('test2.fits'))
assert h['FOO'] == 'B?R'
assert h['COMMENT'] == 'h?llo'
assert len(w) == 1
assert str(w[0].message).startswith(
"non-ASCII characters are present in the FITS file")
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
hdul = fits.open(self.temp('test.fits'))
with catch_warnings() as w:
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
with ignore_warnings():
hdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = '\u30a8\u30ea\u30c3\u30af'
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h['FOO'] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = 'BAZ'
assert h['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', 'BAZ')
assert h['FOO'] == 'BAR'
assert h.comments['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))
pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST',
bytes('Hello', encoding='ascii'))
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp('strip_header_whitespace', False):
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the origianl header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with catch_warnings() as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with catch_warnings() as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')
pytest.raises(ValueError, header.set, 'Just som', 'foo')
pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST', float('nan'))
pytest.raises(ValueError, h.set, 'TEST', np.nan)
pytest.raises(ValueError, h.set, 'TEST', float('inf'))
pytest.raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, 'HISTORY', '\n')
pytest.raises(ValueError, h.set, 'HISTORY', '\nabc')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
pytest.raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data('test0.fits'), 'rb') as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data('test0.fits'))
assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
"""
def setup(self):
super().setup()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with ignore_warnings():
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
pytest.raises(KeyError, lambda k: self._test_header[k], 'DP1.AXIS.3')
# Test the exception message
try:
self._test_header['DP1.AXIS.3']
except KeyError as e:
assert e.args[0] == "Keyword 'DP1.AXIS.3' not found."
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.AXIS.2'
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert ([str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not stricly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert (str(h.cards[0]) ==
_pad("FOO = 'Date: 2012-09-19T13:58:53.756061'"))
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
pytest.raises(KeyError, lambda: h['FOO.AXIS'])
pytest.raises(KeyError, lambda: h['FOO.AXIS.'])
pytest.raises(KeyError, lambda: h['FOO.'])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))
output = hf.parse(extensions=['AIPS FQ'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split('\n')) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1],
keywords=['EXTNAME', 'BITPIX'])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split('\n')) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=['NAXIS*'])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data('test0.fits'))
assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2'])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data('comp.fits'))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1],
compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1],
compressed=True)
hf.close()
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data('zerowidth.fits')
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"])
assert len(mytable) == (len(fitsobj['AIPS FQ'].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header))
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=['AIPS FQ'])
assert np.all(mytable['filename'] == test_filename)
assert np.all(mytable['hdu'] == 'AIPS FQ')
assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['EXTNAME'])
assert len(mytable) == 1
assert mytable['hdu'][0] == "AIPS FQ"
assert mytable['keyword'][0] == "EXTNAME"
assert mytable['value'][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['DOES_NOT_EXIST'])
assert mytable is None
formatter.close()
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_hdu_writeto_mode(self, mode):
with open(self.temp('mode.fits'), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
|
b7126bcf506822b41a363de47b278fddbf0f76304eb8269df2bce33e422c0db8 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import bz2
import io
import mmap
import errno
import os
import pathlib
import warnings
import zipfile
from unittest.mock import patch
import pytest
import numpy as np
from . import FitsTestCase
from astropy.io.fits.convenience import _getext
from astropy.io.fits.diff import FITSDiff
from astropy.io.fits.file import _File, GZIP_MAGIC
from astropy.io import fits
from astropy.tests.helper import raises, catch_warnings, ignore_warnings
from astropy.utils.data import conf, get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils import data
class TestCore(FitsTestCase):
@raises(OSError)
def test_missing_file(self):
fits.open(self.temp('does-not-exist.fits'))
def test_filename_is_bytes_object(self):
with pytest.raises(TypeError):
fits.open(self.data('ascii.fits').encode())
def test_naxisj_check(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist[1].header['NAXIS3'] = 500
assert 'NAXIS3' in hdulist[1].header
hdulist.verify('silentfix')
assert 'NAXIS3' not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
l = fits.HDUList()
n = np.zeros(3, dtype='i2')
n[0] = 1
n[1] = 60000
n[2] = 2
c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768,
array=n)
t = fits.BinTableHDU.from_columns([c])
l.append(p)
l.append(t)
l.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as p:
assert p[1].data[1]['foo'] == 60000.0
def test_fits_file_path_object(self):
"""
Testing when fits file is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/tdim.fits'))
with fits.open(fpath) as hdulist:
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
with fits.open(self.data('tdim.fits')) as hdulist2:
assert FITSDiff(hdulist2, hdulist).identical is True
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name='FOO', format='3J'))
p.add_col(fits.Column(name='BAR', format='1I'))
assert p.names == ['FOO', 'BAR']
p.del_col('FOO')
assert p.names == ['BAR']
def test_add_del_columns2(self):
hdulist = fits.open(self.data('tb.fits'))
table = hdulist[1]
assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
table.columns.del_col(str('c1'))
assert table.data.dtype.names == ('c2', 'c3', 'c4')
assert table.columns.names == ['c2', 'c3', 'c4']
table.columns.del_col(str('c3'))
assert table.data.dtype.names == ('c2', 'c4')
assert table.columns.names == ['c2', 'c4']
table.columns.add_col(fits.Column(str('foo'), str('3J')))
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
hdulist.writeto(self.temp('test.fits'), overwrite=True)
with ignore_warnings():
# TODO: The warning raised by this test is actually indication of a
# bug and should *not* be ignored. But as it is a known issue we
# hide it for now. See
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp('test.fits')) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
header['BITPIX'] = (16, comment)
assert 'BITPIX' in header
assert header['BITPIX'] == 16
assert header.comments['BITPIX'] == comment
header.update(BITPIX=32)
assert header['BITPIX'] == 32
assert header.comments['BITPIX'] == ''
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
card = fits.Card.fromstring('BITPIX = 32 / {}'.format(comment))
header.append(card)
header['BITPIX'] = 32
assert 'BITPIX' in header
assert header['BITPIX'] == 32
assert header.cards[0].keyword == 'BITPIX'
assert header.cards[0].value == 32
assert header.cards[0].comment == comment
def test_uint(self):
filename = self.data('o4sp040b0_raw.fits')
with fits.open(filename, uint=False) as hdulist_f:
with fits.open(filename, uint=True) as hdulist_i:
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', [])
assert len(errs) == 1
assert 'TESTKW' in hdu.header
assert hdu.header['TESTKW'] == 'foo'
assert hdu.header.cards[-1].keyword == 'TESTKW'
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header['TESTKW'] = 'foo'
errs = hdu.req_cards('TESTKW', None,
lambda v: v == 'foo', 'foo', 'ignore', [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar',
'exception', [])
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix',
[])
assert hdu.header['TESTKW'] == 'bar'
@raises(fits.VerifyError)
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option='warn'):
errs = super()._verify(option)
hdu.req_cards('TESTKW', None, None, None, 'fix', errs)
return errs
@classmethod
def match_header(cls, header):
# Since creating this HDU class adds it to the registry we
# don't want the file reader to possibly think any actual
# HDU from a file should be handled by this class
return False
hdu = TestHDU(header=fits.Header())
hdu.verify('fix')
@raises(fits.VerifyError)
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['XTENSION']
hdu.verify('exception')
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
with catch_warnings():
warnings.simplefilter('error')
del hdu.header['NAXIS']
try:
hdu.verify('ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: {}'.format(exc))
# Make sure the error wasn't fixed either, silently or otherwise
assert 'NAXIS' not in hdu.header
@raises(ValueError)
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
hdu.verify('foobarbaz')
def test_errlist_basic(self):
# Just some tests to make sure that _ErrList is setup correctly.
# No arguments
error_list = fits.verify._ErrList()
assert error_list == []
# Some contents - this is not actually working, it just makes sure they
# are kept.
error_list = fits.verify._ErrList([1, 2, 3])
assert error_list == [1, 2, 3]
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
with catch_warnings():
warnings.simplefilter('error')
try:
hdu.verify('silentfix+ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: {}'.format(exc))
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('silentfix+warn')
assert len(w) == 4
assert 'Illegal keyword name' in str(w[2].message)
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
try:
hdu.verify('silentfix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' not in str(exc)
else:
self.fail('An exception should have been raised.')
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('fix+ignore')
assert len(w) == 4
assert 'not upper case' in str(w[2].message)
# fix+warn
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('fix+warn')
assert len(w) == 6
assert 'not upper case' in str(w[2].message)
assert 'Illegal keyword name' in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
try:
hdu.verify('fix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' in str(exc)
else:
self.fail('An exception should have been raised.')
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
filename = self.data('test0.fits')
hl, ext = _getext(filename, 'readonly', 1)
assert ext == 1
hl.close()
pytest.raises(ValueError, _getext, filename, 'readonly',
1, 2)
pytest.raises(ValueError, _getext, filename, 'readonly',
(1, 2))
pytest.raises(ValueError, _getext, filename, 'readonly',
'sci', 'sci')
pytest.raises(TypeError, _getext, filename, 'readonly',
1, 2, 3)
hl, ext = _getext(filename, 'readonly', ext=1)
assert ext == 1
hl.close()
hl, ext = _getext(filename, 'readonly', ext=('sci', 2))
assert ext == ('sci', 2)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
1, ext=('sci', 2), extver=3)
pytest.raises(TypeError, _getext, filename, 'readonly',
ext=('sci', 2), extver=3)
hl, ext = _getext(filename, 'readonly', 'sci')
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', 'sci', 1)
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', ('sci', 1))
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', 'sci',
extver=1, do_not_scale_image_data=True)
assert ext == ('sci', 1)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
'sci', ext=1)
pytest.raises(TypeError, _getext, filename, 'readonly',
'sci', 1, extver=2)
hl, ext = _getext(filename, 'readonly', extname='sci')
assert ext == ('sci', 1)
hl.close()
hl, ext = _getext(filename, 'readonly', extname='sci',
extver=1)
assert ext == ('sci', 1)
hl.close()
pytest.raises(TypeError, _getext, filename, 'readonly',
extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.conf.extension_name_case_sensitive at
runtime works.
"""
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
with fits.conf.set_temp('extension_name_case_sensitive', True):
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'sCi'
assert hdu.header['EXTNAME'] == 'sCi'
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
infile = self.data('test0.fits')
outfile = self.temp('test.fits')
with open(infile, 'rb') as fin:
dat = fin.read()
offset = 0
with fits.open(infile) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header['TEST'] = 'TEST'
hdu.writeto(outfile)
with fits.open(outfile) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header['TEST'] == 'TEST'
assert hdu.data is None
with fits.open(infile)as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header['SIMPLE'] = False
hdu.writeto(self.temp('test.fits'))
info = [(0, '', 1, 'NonstandardHDU', 5, (), '', '')]
with fits.open(self.temp('test.fits')) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == 'PRIMARY'
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert 'EXTNAME' not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = 'NOTREAL'
assert h1.name == 'NOTREAL'
assert h1.header.get('EXTNAME') == 'NOTREAL'
# Updating the EXTNAME in the header should update the .name
h1.header['EXTNAME'] = 'TOOREAL'
assert h1.name == 'TOOREAL'
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header['EXTNAME']
assert h1.name == 'PRIMARY'
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ''
assert 'EXTNAME' not in h2.header
h2.name = 'HELLO'
assert h2.name == 'HELLO'
assert h2.header.get('EXTNAME') == 'HELLO'
h2.header['EXTNAME'] = 'GOODBYE'
assert h2.name == 'GOODBYE'
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert 'EXTVER' not in h1.header
assert 'EXTLEVEL' not in h1.header
h1.ver = 2
assert h1.header.get('EXTVER') == 2
h1.header['EXTVER'] = 3
assert h1.ver == 3
del h1.header['EXTVER']
h1.ver == 1
h1.level = 2
assert h1.header.get('EXTLEVEL') == 2
h1.header['EXTLEVEL'] = 3
assert h1.level == 3
del h1.header['EXTLEVEL']
assert h1.level == 1
pytest.raises(TypeError, setattr, h1, 'ver', 'FOO')
pytest.raises(TypeError, setattr, h1, 'level', 'BAR')
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data('test0.fits')) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header['TEST{}'.format(idx)] = 'test'
hdul1.writeto(self.temp('test1.fits'))
hdul1.writeto(self.temp('test2.fits'))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data('test0.fits')) as hdul2:
with fits.open(self.temp('test2.fits')) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul2, hdul)):
hdu2, hdu = hdus
if idx != 1:
assert hdu.header == hdu2.header
else:
assert (hdu2.header ==
hdu.header[:len(hdu2.header)])
assert np.all(hdu.data == hdu2.data)
class TestConvenienceFunctions(FitsTestCase):
def test_writeto(self):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
filename = self.temp('array.fits')
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(filename, data, header=header, overwrite=True)
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
filename = self.temp('array.fits')
data = np.zeros((100, 100))
header = fits.Header()
header.set('CRPIX1', 1.)
fits.writeto(filename, data, header=header,
overwrite=True, output_verify='silentfix')
with fits.open(filename) as hdul:
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert 'CRPIX1' in hdul[0].header
assert hdul[0].header['CRPIX1'] == 1.0
class TestFileFunctions(FitsTestCase):
"""
Tests various basic I/O operations, specifically in the
astropy.io.fits.file._File class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
OSError (and not some other arbitrary exception).
"""
try:
fits.open(self.temp('foobar.fits'))
except OSError as e:
assert 'No such file or directory' in str(e)
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ('ostream', 'append'):
with fits.open(self.temp('foobar.fits'), mode=mode) as h:
pass
assert os.path.exists(self.temp('foobar.fits'))
os.remove(self.temp('foobar.fits'))
def test_open_file_handle(self):
# Make sure we can open a FITS file from an open file handle
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as fitsfile:
pass
with open(self.temp('temp.fits'), 'wb') as handle:
with fits.open(handle, mode='ostream') as fitsfile:
pass
# Opening without explicitly specifying binary mode should fail
with pytest.raises(ValueError):
with open(self.data('test0.fits')) as handle:
with fits.open(handle) as fitsfile:
pass
# All of these read modes should fail
for mode in ['r', 'rt']:
with pytest.raises(ValueError):
with open(self.data('test0.fits'), mode=mode) as handle:
with fits.open(handle) as fitsfile:
pass
# These update or write modes should fail as well
for mode in ['w', 'wt', 'w+', 'wt+', 'r+', 'rt+',
'a', 'at', 'a+', 'at+']:
with pytest.raises(ValueError):
with open(self.temp('temp.fits'), mode=mode) as handle:
with fits.open(handle) as fitsfile:
pass
def test_fits_file_handle_mode_combo(self):
# This should work fine since no mode is given
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as fitsfile:
pass
# This should work fine since the modes are compatible
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='readonly') as fitsfile:
pass
# This should not work since the modes conflict
with pytest.raises(ValueError):
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='ostream') as fitsfile:
pass
def test_open_from_url(self):
import urllib.request
file_url = "file:///" + self.data('test0.fits')
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj) as fits_handle:
pass
# It will not be possible to write to a file that is from a URL object
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
pass
@pytest.mark.remote_data(source='astropy')
def test_open_from_remote_url(self):
import urllib.request
for dataurl in (conf.dataurl, conf.dataurl_mirror):
remote_url = '{}/{}'.format(dataurl, 'allsky/allsky_rosat.fits')
try:
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj) as fits_handle:
assert len(fits_handle) == 1
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
assert len(fits_handle) == 1
except (urllib.error.HTTPError, urllib.error.URLError):
continue
else:
break
else:
raise Exception("Could not download file")
def test_open_gzipped(self):
gzip_file = self._make_gzip_file()
with ignore_warnings():
with fits.open(gzip_file) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
with fits.open(gzip.GzipFile(gzip_file)) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_open_gzipped_from_handle(self):
with open(self._make_gzip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'gzip'
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with ignore_warnings():
with fits.open(self._make_gzip_file('test0.fz')) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.gz')) as hdul:
assert hdul[0].header == h.header
def test_fits_update_mode_gzip(self):
"""Test updating a GZipped FITS file"""
with fits.open(self._make_gzip_file('update.gz'), mode='update') as fits_handle:
hdu = fits.ImageHDU(data=[x for x in range(100)])
fits_handle.append(hdu)
with fits.open(self.temp('update.gz')) as new_handle:
assert len(new_handle) == 6
assert (new_handle[-1].data == [x for x in range(100)]).all()
def test_fits_append_mode_gzip(self):
"""Make sure that attempting to open an existing GZipped FITS file in
'append' mode raises an error"""
with pytest.raises(OSError):
with fits.open(self._make_gzip_file('append.gz'), mode='append') as fits_handle:
pass
def test_open_bzipped(self):
bzip_file = self._make_bzip2_file()
with ignore_warnings():
with fits.open(bzip_file) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
with fits.open(bz2.BZ2File(bzip_file)) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_open_bzipped_from_handle(self):
with open(self._make_bzip2_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with ignore_warnings():
with fits.open(self._make_bzip2_file('test0.xx')) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp('test.fits.bz2'), 'w')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp('testname.fits.bz2')
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp('testname.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zip_file = self._make_zip_file()
with ignore_warnings():
with fits.open(zip_file) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
with fits.open(zipfile.ZipFile(zip_file)) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_open_zipped_from_handle(self):
with open(self._make_zip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename='test0.fz')
with ignore_warnings():
assert len(fits.open(zf)) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
zf = zipfile.ZipFile(zf, 'a')
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
def test_read_open_astropy_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2774
This tests reading from a ``GzipFile`` object from Astropy's
compatibility copy of the ``gzip`` module.
"""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
@raises(OSError)
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w')
zfile.write(self.data('test0.fits'))
zfile.writestr('foo', 'bar')
zfile.close()
fits.open(zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data('test0.fits'), 'rb') as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data('test0.fits'), 'rb')
f.close()
with fits.open(f) as f2:
assert len(f2) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode='update') as h:
h[0].header['EXPFLAG'] = 'ABNORMAL'
h[1].data[0, 0] = 1
with fits.open(gf) as h:
# Just to make sur ethe update worked; if updates work
# normal writes should work too...
assert h[0].header['EXPFLAG'] == 'ABNORMAL'
assert h[1].data[0, 0] == 1
def test_write_read_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp('test.fits.gz'))
with open(self.temp('test.fits.gz'), 'rb') as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp('test.fits.gz')) as hdul:
assert np.all(hdul[0].data == data)
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = io.BytesIO()
with open(self.data('test0.fits'), 'rb') as f:
filelike.write(f.read())
filelike.seek(0)
with ignore_warnings():
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode='update')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified io.fits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file('test0.fits')
# Opening in text mode should outright fail
for mode in ('r', 'w', 'a'):
with open(self.temp('test0.fits'), mode) as f:
pytest.raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'readonly'
for mode in ('wb', 'ab'):
with open(self.temp('test0.fits'), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'wb+') as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'update'
with open(self.temp('test0.fits'), 'ab+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'append'
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise OSError('flush is broken on this platform')
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__['_mmap_available']._cache.clear()
try:
self.copy_file('test0.fits')
with catch_warnings() as w:
with fits.open(self.temp('test0.fits'), mode='update',
memmap=True) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
assert 'mmap.flush is unavailable' in str(w[0].message)
# Double check that writing without mmap still worked
with fits.open(self.temp('test0.fits')) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__['_mmap_available']._cache.clear()
@pytest.mark.openfiles_ignore
def test_mmap_allocate_error(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1380
Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY.
"""
mmap_original = mmap.mmap
# We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which
# emulates an issue that an OSError is raised if the available address
# space is less than the size of the file even if memory mapping is used.
def mmap_patched(*args, **kwargs):
if kwargs.get('access') == mmap.ACCESS_COPY:
exc = OSError()
exc.errno = errno.ENOMEM
raise exc
else:
return mmap_original(*args, **kwargs)
with fits.open(self.data('test0.fits'), memmap=True) as hdulist:
with patch.object(mmap, 'mmap', side_effect=mmap_patched) as p:
with pytest.warns(AstropyUserWarning, match="Could not memory "
"map array with mode='readonly'"):
data = hdulist[1].data
p.reset_mock()
assert not data.flags.writeable
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail('not expected to work on platforms without mmap '
'support')
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from "file-like" objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike:
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data('test0.fits'), 'rb') as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data('test0.fits')) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def test_write_bytesio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy array
should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = io.BytesIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test againt `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
@pytest.mark.skipif(str('sys.platform.startswith("win32")'))
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = 'APEXHET.2014-04-01T15:18:01.000.fits'
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def test_writeto_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to write an hdulist
to a full disk.
"""
def _writeto(self, array):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
with pytest.raises(OSError) as exc:
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
n = np.arange(0, 1000, dtype='int64')
hdu = fits.PrimaryHDU(n)
hdulist = fits.HDUList(hdu)
filename = self.temp('test.fits')
with open(filename, mode='wb') as fileobj:
hdulist.writeto(fileobj)
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def test_flush_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to update an hdulist
to a full disk.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.writeto(filename)
def _writedata(self, fileobj):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata)
monkeypatch.setattr(data, "get_free_space_in_dir",
get_free_space_in_dir)
with pytest.raises(OSError) as exc:
with fits.open(filename, mode='update') as hdul:
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename='test0.fits.gz'):
gzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
gz = gzip.open(gzfile, 'wb')
gz.write(f.read())
gz.close()
return gzfile
def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'):
zfile = zipfile.ZipFile(self.temp(filename), 'w')
zfile.write(self.data('test0.fits'))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename='test0.fits.bz2'):
bzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
bz = bz2.BZ2File(bzfile, 'w')
bz.write(f.read())
bz.close()
return bzfile
class TestStreamingFunctions(FitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self):
shdu = self._make_streaming_hdu(self.temp('new.fits'))
assert isinstance(shdu.size, int)
assert shdu.size == 100
shdu.close()
@raises(ValueError)
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected.
"""
with open(self.temp('new.fits'), 'wb') as f:
header = fits.Header()
fits.StreamingHDU(f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
with fits.open(self.temp('new.fits')) as hdul:
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = io.BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self, capsys):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header['EXTNAME'] = 12345678
hdul = fits.HDUList([phdu, ihdu])
filename = self.temp('temp.fits')
pytest.raises(fits.VerifyError, hdul.writeto, filename,
output_verify='exception')
with pytest.warns(fits.verify.VerifyWarning,
match='Verification reported errors'):
hdul.writeto(filename, output_verify='fix')
with fits.open(filename):
assert hdul[1].name == '12345678'
assert hdul[1].header['EXTNAME'] == '12345678'
hdul.close()
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd['SIMPLE'] = (True, 'conforms to FITS standard')
hd['BITPIX'] = (32, 'array data type')
hd['NAXIS'] = (2, 'number of array dimensions')
hd['NAXIS1'] = 5
hd['NAXIS2'] = 5
hd['EXTEND'] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data('blank.fits'), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data('blank.fits')
with fits.open(pth, memmap=True) as hdul:
with pytest.raises(ValueError):
hdul[0].data
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
with fits.open(pth, memmap=True, do_not_scale_image_data=True) as hdul:
hdul[0].data # Just make sure it doesn't crash
|
805a6c25e95a6d7bdb5b252c62839c996b6e7233f7ca73906565d0d9b383e780 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from . import FitsTestCase
from astropy.io.fits.fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword
from astropy.coordinates import EarthLocation
from astropy.io import fits
from astropy.table import Table, QTable
from astropy.time import Time, TimeDelta
from astropy.time.core import BARYCENTRIC_SCALES
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
class TestFitsTime(FitsTestCase):
def setup_class(self):
self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'])
self.time_3d = np.array([[[1, 2], [1, 3], [3, 4]]])
def test_is_time_column_keyword(self):
# Time column keyword without column number
assert is_time_column_keyword('TRPOS') is False
# Global time column keyword
assert is_time_column_keyword('TIMESYS') is False
# Valid time column keyword
assert is_time_column_keyword('TRPOS12') is True
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_loc(self, table_types):
"""
Test all the unusual conditions for locations of ``Time``
columns in a ``Table``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = Time(self.time, format='isot', scale='tt')
# Check that vectorized location is stored using Green Bank convention
t['a'].location = EarthLocation([1., 2.], [2., 3.], [3., 4.],
unit='Mm')
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
t.write(self.temp('time.fits'), format='fits', overwrite=True)
with pytest.warns(fits.verify.VerifyWarning,
match='Invalid keyword for column 2'):
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
assert tm['b'].location == t['b'].location
# Check that multiple Time columns with different locations raise an exception
t['a'].location = EarthLocation(1, 2, 3)
t['b'].location = EarthLocation(2, 3, 4)
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Multiple Time Columns with different geocentric' in str(err.value)
# Check that Time column with no location specified will assume global location
t['b'].location = None
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Time Column "b" has no specified '
'location, but global Time Position '
'is present')
# Check that multiple Time columns with same location can be written
t['b'].location = EarthLocation(1, 2, 3)
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 0
# Check compatibility of Time Scales and Reference Positions
for scale in BARYCENTRIC_SCALES:
t.replace_column('a', getattr(t['a'], scale))
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Earth Location "TOPOCENTER" '
'for Time Column')
# Check that multidimensional vectorized location (ndim=3) is stored
# using Green Bank convention.
t = table_types()
location = EarthLocation([[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]], unit='Mm')
t['a'] = Time(self.time_3d, format='jd', location=location)
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
# Check that singular location with ndim>1 can be written
t['a'] = Time(self.time, location=EarthLocation([[[1.]]], [[[2.]]],
[[[3.]]], unit='Mm'))
table, hdr = time_to_fits(t)
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert tm['a'].location == t['a'].location
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_header(self, table_types):
"""
Test the header and metadata returned by ``time_to_fits``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(-2446354,
4237210, 4077985, unit='m'))
t['b'] = Time([1,2], format='cxcsec', scale='tt')
ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value,
'OBSGEO-Y' : t['a'].location.y.value,
'OBSGEO-Z' : t['a'].location.z.value}
with pytest.warns(AstropyUserWarning, match='Time Column "b" has no '
'specified location, but global Time Position is present'):
table, hdr = time_to_fits(t)
# Check the global time keywords in hdr
for key, value in GLOBAL_TIME_INFO.items():
assert hdr[key] == value[0]
assert hdr.comments[key] == value[1]
hdr.remove(key)
for key, value in ideal_col_hdr.items():
assert hdr[key] == value
hdr.remove(key)
# Check the column-specific time metadata
coord_info = table.meta['__coordinate_columns__']
for colname in coord_info:
assert coord_info[colname]['coord_type'] == t[colname].scale.upper()
assert coord_info[colname]['coord_unit'] == 'd'
assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'
assert len(hdr) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_meta(self, table_types):
"""
Test that the relevant global time metadata is read into
``Table.meta`` as ``Time``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t.meta['DATE'] = '1999-01-01T00:00:00'
t.meta['MJD-OBS'] = 56670
# Test for default write behavior (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
with pytest.warns(fits.verify.VerifyWarning,
match='Invalid keyword for column 1'):
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].format == 'fits'
# Default time scale according to the FITS standard is UTC
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].format == 'mjd'
assert tm.meta['MJD-OBS'].scale == 'utc'
# Explicitly specified Time Scale
t.meta['TIMESYS'] = 'ET'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
with pytest.warns(fits.verify.VerifyWarning,
match='Invalid keyword for column 1'):
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE']
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]
# Test for conversion of time data to its value, as defined by its format
t['a'].info.serialize_method['fits'] = 'formatted_value'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits')
# Test DATE
assert not isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'] == t.meta['DATE']
# Test MJD-xxx
assert not isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']
assert (tm['a'] == t['a'].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_loc_unit(self, table_types):
"""
Test that ``location`` specified by using any valid unit
(length/angle) in ``Time`` columns gets stored in FITS
as ITRS Cartesian coordinates (X, Y, Z), each in m.
Test that it round-trips through FITS.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(1,2,3, unit='km'))
table, hdr = time_to_fits(t)
# Check the header
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Check the round-trip of location
assert (tm['a'].location == t['a'].location).all()
assert tm['a'].location.x.value == t['a'].location.x.to_value(unit='m')
assert tm['a'].location.y.value == t['a'].location.y.to_value(unit='m')
assert tm['a'].location.z.value == t['a'].location.z.to_value(unit='m')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits(self, table_types):
"""
Test that FITS table with time columns (standard compliant)
can be read by io.fits as a table with Time columns.
This tests the following:
1. The special-case where a column has the name 'TIME' and a
time unit
2. Time from Epoch (Reference time) is appropriately converted.
3. Coordinate columns (corresponding to coordinate keywords in the header)
other than time, that is, spatial coordinates, are not mistaken
to be time.
"""
filename = self.data('chandra_time.fits')
with pytest.warns(AstropyUserWarning, match='Time column "time" reference '
'position will be ignored'):
tm = table_types.read(filename, astropy_native=True)
# Test case 1
assert isinstance(tm['time'], Time)
assert tm['time'].scale == 'tt'
assert tm['time'].format == 'mjd'
non_native = table_types.read(filename)
# Test case 2
ref_time = Time(non_native.meta['MJDREF'], format='mjd',
scale=non_native.meta['TIMESYS'].lower())
delta_time = TimeDelta(non_native['time'])
assert (ref_time + delta_time == tm['time']).all()
# Test case 3
for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']:
assert not isinstance(tm[colname], Time)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_datetime(self, table_types):
"""
Test that ISO-8601 Datetime String Columns are read correctly.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TCG',
time_ref_pos='GEOCENTER', array=self.time)
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].scale == 'tcg'
assert tm['datetime'].format == 'fits'
assert (tm['datetime'] == self.time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location(self, table_types):
"""
Test that geocentric/geodetic observatory position is read
properly, as and when it is specified.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
# Observatory position in ITRS Cartesian coordinates (geocentric)
cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210),
('OBSGEO-Z', 4077985)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.x.value == -2446354
assert tm['datetime'].location.y.value == 4237210
assert tm['datetime'].location.z.value == 4077985
# Observatory position in geodetic coordinates
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.lon.value == 0
assert tm['datetime'].location.lat.value == 0
assert np.isclose(tm['datetime'].location.height.value, 0,
rtol=0, atol=1e-9)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_scale(self, table_types):
"""
Test handling of 'GPS' and 'LOCAL' time scales which are
recognized by the FITS standard but are not native to astropy.
"""
# GPS scale column
gps_time = np.array([630720013, 630720014])
c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS',
coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time)
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert 'FITS recognized time scale value "GPS"' in str(w[0].message)
assert isinstance(tm['gps_time'], Time)
assert tm['gps_time'].format == 'gps'
assert tm['gps_time'].scale == 'tai'
assert (tm['gps_time'].value == gps_time).all()
# LOCAL scale column
local_time = np.array([1, 2])
c = fits.Column(name='local_time', format='D', unit='d',
coord_type='LOCAL', coord_unit='d',
time_ref_pos='RELOCATABLE', array=local_time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['local_time'], Time)
assert tm['local_time'].format == 'mjd'
assert tm['local_time'].scale == 'local'
assert (tm['local_time'].value == local_time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location_warnings(self, table_types):
"""
Test warnings for time column reference position.
"""
# Time reference position "TOPOCENTER" without corresponding
# observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('observatory position is not properly specified' in
str(w[0].message))
# Default value for time reference position is "TOPOCENTER"
c = fits.Column(name='datetime', format='A29', coord_type='TT',
array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('"TRPOSn" is not specified. The default value for '
'it is "TOPOCENTER"' in str(w[0].message))
|
f86e0b1a1ef2d299f60429aafea1d6a8751609fcfc593888fd03823d23761bf6 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import platform
import re
import time
import warnings
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.tests.helper import catch_warnings, ignore_warnings
from astropy.io.fits.hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from .test_table import comparerecords
from . import FitsTestCase
try:
import scipy # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3,7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writting to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writting to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3,5,7)
b = np.asfortranarray(a)
# writting to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writting to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
@pytest.mark.xfail(platform.system() == 'Windows',
reason='https://github.com/astropy/astropy/issues/5797')
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# the write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with catch_warnings() as w:
hdu.verify()
text = "HDUList's 0th element is not a primary HDU."
assert len(w) == 3
assert text in str(w[1].message)
with catch_warnings() as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
text = ("HDUList's 0th element is not a primary HDU. "
"Fixed by inserting one as 0th HDU.")
assert len(w) == 3
assert text in str(w[1].message)
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
# TODO: Generate these perumtions instead of having them all written
# out, yeesh!
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :] == dat[0, :, :]).all()
assert (d.section[1, :, :] == dat[1, :, :]).all()
assert (d.section[0, 0, :] == dat[0, 0, :]).all()
assert (d.section[0, 1, :] == dat[0, 1, :]).all()
assert (d.section[0, 2, :] == dat[0, 2, :]).all()
assert (d.section[1, 0, :] == dat[1, 0, :]).all()
assert (d.section[1, 1, :] == dat[1, 1, :]).all()
assert (d.section[1, 2, :] == dat[1, 2, :]).all()
assert (d.section[0, 0, 0] == dat[0, 0, 0]).all()
assert (d.section[0, 0, 1] == dat[0, 0, 1]).all()
assert (d.section[0, 0, 2] == dat[0, 0, 2]).all()
assert (d.section[0, 1, 0] == dat[0, 1, 0]).all()
assert (d.section[0, 1, 1] == dat[0, 1, 1]).all()
assert (d.section[0, 1, 2] == dat[0, 1, 2]).all()
assert (d.section[0, 2, 0] == dat[0, 2, 0]).all()
assert (d.section[0, 2, 1] == dat[0, 2, 1]).all()
assert (d.section[0, 2, 2] == dat[0, 2, 2]).all()
assert (d.section[1, 0, 0] == dat[1, 0, 0]).all()
assert (d.section[1, 0, 1] == dat[1, 0, 1]).all()
assert (d.section[1, 0, 2] == dat[1, 0, 2]).all()
assert (d.section[1, 1, 0] == dat[1, 1, 0]).all()
assert (d.section[1, 1, 1] == dat[1, 1, 1]).all()
assert (d.section[1, 1, 2] == dat[1, 1, 2]).all()
assert (d.section[1, 2, 0] == dat[1, 2, 0]).all()
assert (d.section[1, 2, 1] == dat[1, 2, 1]).all()
assert (d.section[1, 2, 2] == dat[1, 2, 2]).all()
assert (d.section[:, 0, 0] == dat[:, 0, 0]).all()
assert (d.section[:, 0, 1] == dat[:, 0, 1]).all()
assert (d.section[:, 0, 2] == dat[:, 0, 2]).all()
assert (d.section[:, 1, 0] == dat[:, 1, 0]).all()
assert (d.section[:, 1, 1] == dat[:, 1, 1]).all()
assert (d.section[:, 1, 2] == dat[:, 1, 2]).all()
assert (d.section[:, 2, 0] == dat[:, 2, 0]).all()
assert (d.section[:, 2, 1] == dat[:, 2, 1]).all()
assert (d.section[:, 2, 2] == dat[:, 2, 2]).all()
assert (d.section[0, :, 0] == dat[0, :, 0]).all()
assert (d.section[0, :, 1] == dat[0, :, 1]).all()
assert (d.section[0, :, 2] == dat[0, :, 2]).all()
assert (d.section[1, :, 0] == dat[1, :, 0]).all()
assert (d.section[1, :, 1] == dat[1, :, 1]).all()
assert (d.section[1, :, 2] == dat[1, :, 2]).all()
assert (d.section[:, :, 0] == dat[:, :, 0]).all()
assert (d.section[:, :, 1] == dat[:, :, 1]).all()
assert (d.section[:, :, 2] == dat[:, :, 2]).all()
assert (d.section[:, 0, :] == dat[:, 0, :]).all()
assert (d.section[:, 1, :] == dat[:, 1, :]).all()
assert (d.section[:, 2, :] == dat[:, 2, :]).all()
assert (d.section[:, :, 0:1] == dat[:, :, 0:1]).all()
assert (d.section[:, :, 0:2] == dat[:, :, 0:2]).all()
assert (d.section[:, :, 0:3] == dat[:, :, 0:3]).all()
assert (d.section[:, :, 1:2] == dat[:, :, 1:2]).all()
assert (d.section[:, :, 1:3] == dat[:, :, 1:3]).all()
assert (d.section[:, :, 2:3] == dat[:, :, 2:3]).all()
assert (d.section[0:1, 0:1, 0:1] == dat[0:1, 0:1, 0:1]).all()
assert (d.section[0:1, 0:1, 0:2] == dat[0:1, 0:1, 0:2]).all()
assert (d.section[0:1, 0:1, 0:3] == dat[0:1, 0:1, 0:3]).all()
assert (d.section[0:1, 0:1, 1:2] == dat[0:1, 0:1, 1:2]).all()
assert (d.section[0:1, 0:1, 1:3] == dat[0:1, 0:1, 1:3]).all()
assert (d.section[0:1, 0:1, 2:3] == dat[0:1, 0:1, 2:3]).all()
assert (d.section[0:1, 0:2, 0:1] == dat[0:1, 0:2, 0:1]).all()
assert (d.section[0:1, 0:2, 0:2] == dat[0:1, 0:2, 0:2]).all()
assert (d.section[0:1, 0:2, 0:3] == dat[0:1, 0:2, 0:3]).all()
assert (d.section[0:1, 0:2, 1:2] == dat[0:1, 0:2, 1:2]).all()
assert (d.section[0:1, 0:2, 1:3] == dat[0:1, 0:2, 1:3]).all()
assert (d.section[0:1, 0:2, 2:3] == dat[0:1, 0:2, 2:3]).all()
assert (d.section[0:1, 0:3, 0:1] == dat[0:1, 0:3, 0:1]).all()
assert (d.section[0:1, 0:3, 0:2] == dat[0:1, 0:3, 0:2]).all()
assert (d.section[0:1, 0:3, 0:3] == dat[0:1, 0:3, 0:3]).all()
assert (d.section[0:1, 0:3, 1:2] == dat[0:1, 0:3, 1:2]).all()
assert (d.section[0:1, 0:3, 1:3] == dat[0:1, 0:3, 1:3]).all()
assert (d.section[0:1, 0:3, 2:3] == dat[0:1, 0:3, 2:3]).all()
assert (d.section[0:1, 1:2, 0:1] == dat[0:1, 1:2, 0:1]).all()
assert (d.section[0:1, 1:2, 0:2] == dat[0:1, 1:2, 0:2]).all()
assert (d.section[0:1, 1:2, 0:3] == dat[0:1, 1:2, 0:3]).all()
assert (d.section[0:1, 1:2, 1:2] == dat[0:1, 1:2, 1:2]).all()
assert (d.section[0:1, 1:2, 1:3] == dat[0:1, 1:2, 1:3]).all()
assert (d.section[0:1, 1:2, 2:3] == dat[0:1, 1:2, 2:3]).all()
assert (d.section[0:1, 1:3, 0:1] == dat[0:1, 1:3, 0:1]).all()
assert (d.section[0:1, 1:3, 0:2] == dat[0:1, 1:3, 0:2]).all()
assert (d.section[0:1, 1:3, 0:3] == dat[0:1, 1:3, 0:3]).all()
assert (d.section[0:1, 1:3, 1:2] == dat[0:1, 1:3, 1:2]).all()
assert (d.section[0:1, 1:3, 1:3] == dat[0:1, 1:3, 1:3]).all()
assert (d.section[0:1, 1:3, 2:3] == dat[0:1, 1:3, 2:3]).all()
assert (d.section[1:2, 0:1, 0:1] == dat[1:2, 0:1, 0:1]).all()
assert (d.section[1:2, 0:1, 0:2] == dat[1:2, 0:1, 0:2]).all()
assert (d.section[1:2, 0:1, 0:3] == dat[1:2, 0:1, 0:3]).all()
assert (d.section[1:2, 0:1, 1:2] == dat[1:2, 0:1, 1:2]).all()
assert (d.section[1:2, 0:1, 1:3] == dat[1:2, 0:1, 1:3]).all()
assert (d.section[1:2, 0:1, 2:3] == dat[1:2, 0:1, 2:3]).all()
assert (d.section[1:2, 0:2, 0:1] == dat[1:2, 0:2, 0:1]).all()
assert (d.section[1:2, 0:2, 0:2] == dat[1:2, 0:2, 0:2]).all()
assert (d.section[1:2, 0:2, 0:3] == dat[1:2, 0:2, 0:3]).all()
assert (d.section[1:2, 0:2, 1:2] == dat[1:2, 0:2, 1:2]).all()
assert (d.section[1:2, 0:2, 1:3] == dat[1:2, 0:2, 1:3]).all()
assert (d.section[1:2, 0:2, 2:3] == dat[1:2, 0:2, 2:3]).all()
assert (d.section[1:2, 0:3, 0:1] == dat[1:2, 0:3, 0:1]).all()
assert (d.section[1:2, 0:3, 0:2] == dat[1:2, 0:3, 0:2]).all()
assert (d.section[1:2, 0:3, 0:3] == dat[1:2, 0:3, 0:3]).all()
assert (d.section[1:2, 0:3, 1:2] == dat[1:2, 0:3, 1:2]).all()
assert (d.section[1:2, 0:3, 1:3] == dat[1:2, 0:3, 1:3]).all()
assert (d.section[1:2, 0:3, 2:3] == dat[1:2, 0:3, 2:3]).all()
assert (d.section[1:2, 1:2, 0:1] == dat[1:2, 1:2, 0:1]).all()
assert (d.section[1:2, 1:2, 0:2] == dat[1:2, 1:2, 0:2]).all()
assert (d.section[1:2, 1:2, 0:3] == dat[1:2, 1:2, 0:3]).all()
assert (d.section[1:2, 1:2, 1:2] == dat[1:2, 1:2, 1:2]).all()
assert (d.section[1:2, 1:2, 1:3] == dat[1:2, 1:2, 1:3]).all()
assert (d.section[1:2, 1:2, 2:3] == dat[1:2, 1:2, 2:3]).all()
assert (d.section[1:2, 1:3, 0:1] == dat[1:2, 1:3, 0:1]).all()
assert (d.section[1:2, 1:3, 0:2] == dat[1:2, 1:3, 0:2]).all()
assert (d.section[1:2, 1:3, 0:3] == dat[1:2, 1:3, 0:3]).all()
assert (d.section[1:2, 1:3, 1:2] == dat[1:2, 1:3, 1:2]).all()
assert (d.section[1:2, 1:3, 1:3] == dat[1:2, 1:3, 1:3]).all()
assert (d.section[1:2, 1:3, 2:3] == dat[1:2, 1:3, 2:3]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype('>i2')
with fits.open(self.data('scale.fits')) as hdul:
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
with fits.open(self.temp('test_new.fits'), uint=True) as f:
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(np.random.rand(100,100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the apppriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = 'uint{}'.format(int_size)
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == 'uint{}'.format(int_size)
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = 'uint{}.fits'.format(int_size)
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == 'uint{}'.format(int_size)
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with catch_warnings() as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with catch_warnings() as w:
with fits.open(self.temp('test_new.fits')) as h:
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
assert np.all(arr == h[0].data)
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp('test.fits')
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(fits.verify.VerifyWarning,
match="Invalid 'BLANK' keyword in header"):
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with catch_warnings() as w:
with fits.open(self.temp('test2.fits')) as hdul2:
assert len(w) == 0
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
assert np.isnan(data[0])
with fits.open(filename,
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match="Invalid value for "
"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with catch_warnings() as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with catch_warnings():
# No warnings should be displayed in this case
warnings.simplefilter('error')
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
noise = np.random.normal(size=(1000, 1000))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
with ignore_warnings():
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with catch_warnings() as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
"Keyword {!r} is reserved".format(keyword))
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regresion test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with catch_warnings() as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = os.path.join(os.path.dirname(__file__),
'data', 'compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
|
82a4c1c9d0705e941010f61828a77949e0b5254b8d1551d1abcfe0ac40ac87b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import functools
from io import BytesIO
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from astropy.table import Table, MaskedColumn
from astropy.io import ascii
from astropy.io.ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from astropy.io.ascii.cparser import CParserError
from astropy.io.ascii.fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii'))
TRAVIS = os.environ.get('TRAVIS', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter)))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, it should fall back
to strings.
"""
text = """
A B C D E
1 a 3 4 5
2. 1 9 10 -5.3e4
4 2 -12 .4 six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = """
COL1 COL2 COL3
1 A -1
2 B -2
"""
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = """
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
"""
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = """\
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
"""
ascii.read(tbl, format='csv', fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
"A B" C D
1.5 2.1 -37.1
a b " c
d"
"""
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
"""
with pytest.raises(InconsistentTableError) as e:
table = FastBasic().read(text)
assert 'InconsistentTableError: Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
table = FastCsv().read(text)
assert 'InconsistentTableError: Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
table = FastCsv().read(text)
assert 'InconsistentTableError: Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError) as e:
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
\t1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] is ma.masked # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True)
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t"""
head = ['A{0}'.format(i) for i in range(28)]
table = read_tab(content, data_start=1,
parallel=parallel, names=head)
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running
the test requires quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print("Creating a {} rows table ({} columns).".format(NB_ROWS, NB_COLS))
data = np.random.random(NB_ROWS)
t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print("Saving the table to {}".format(filename))
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {}"
" + 1 (header).".format(NB_ROWS))
assert sum(1 for line in open(filename)) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table2(tmpdir):
"""Test reading of a file with a huge column.
"""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = (2**32 // 2) // 10 + 5
filename = str(tmpdir.join("big_table.csv"))
print("Creating a {} rows table.".format(NB_ROWS))
data = np.full(2**32 // 2 // 10 + 5, int(1e9), dtype=np.int32)
t = Table(data=[data], names=['a'], copy=False)
print("Saving the table to {}".format(filename))
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {}"
" + 1 (header).".format(NB_ROWS))
assert sum(1 for line in open(filename)) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min+1
imax = np.iinfo(int).max-1
huge = '{:d}'.format(imax+2)
text = 'P M S\n {:d} {:d} {:s}'.format(imax, imin, huge)
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
# check with leading zeroes to make sure strtol does not read them as octal
text = 'P M S\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge)
expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
# Mixed columns should be returned as float, but if the out-of-range integer
# shows up first, it will produce a string column - with both readers
pytest.xfail("Integer fallback depends on order of rows")
text = 'A B\n 12.3 {0:d}9\n {0:d}9 45.6e7'.format(imax)
expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]],
names=('A', 'B'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6*('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3*('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6*('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
t6 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
t7 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
t8 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
t = ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
@pytest.mark.parametrize('fast_reader', [False,
dict(parallel=True),
dict(parallel=False)])
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta['comments'] == ['comment 1', 'comment 2']
assert len(t) == 0
assert t.colnames == ['col1', 'col2']
|
bdab091907bafbe583fa3959c9d23777743dd87fe48aaae5cd3c39b930c9991d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.tests.helper import catch_warnings
from astropy.io.votable import converters
from astropy.io.votable import exceptions
from astropy.io.votable import tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {'verify': 'exception',
'filename': 'foo.xml'}
pos = (42, 64)
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config, pos=pos)
c = converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number': 47,
'is_exception': False,
'nchar': 64,
'warning': 'W47',
'is_something': True,
'message': 'Missing arraysize indicates length 1',
'doc_url': 'io/votable/api_exceptions.html#w47',
'nline': 42,
'is_warning': True
}
assert parts == match
|
4560cb1309bac12a555c0c659aede22c44650c7579b2250ca12833a80f0bf622 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
from unittest import mock
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable import tree
from astropy.io.votable.exceptions import VOTableSpecError, VOWarning
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
from astropy.tests.helper import raises, catch_warnings
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
table = parse_single_table(get_pkg_data_filename('data/regression.xml'))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
table2 = parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=1)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
@raises(IndexError)
def test_parse_single_table3():
parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=3)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/regression.xml'),
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
((str('string test'), str('string_test')), str('|O8')),
((str('fixed string test'), str('string_test_2')), str('|S10')),
(str('unicode_test'), str('|O8')),
((str('unicode test'), str('fixed_unicode_test')), str('<U10')),
((str('string array test'), str('string_array_test')), str('|S4')),
(str('unsignedByte'), str('|u1')),
(str('short'), str('<i2')),
(str('int'), str('<i4')),
(str('long'), str('<i8')),
(str('double'), str('<f8')),
(str('float'), str('<f4')),
(str('array'), str('|O8')),
(str('bit'), str('|b1')),
(str('bitarray'), str('|b1'), (3, 2)),
(str('bitvararray'), str('|O8')),
(str('bitvararray2'), str('|O8')),
(str('floatComplex'), str('<c8')),
(str('doubleComplex'), str('<c16')),
(str('doubleComplexArray'), str('|O8')),
(str('doubleComplexArrayFixed'), str('<c16'), (2,)),
(str('boolean'), str('|b1')),
(str('booleanArray'), str('|b1'), (4,)),
(str('nulls'), str('<i4')),
(str('nulls_array'), str('<i4'), (2, 2)),
(str('precision1'), str('<f8')),
(str('precision2'), str('<f8')),
(str('doublearray'), str('|O8')),
(str('bitarray2'), str('|b1'), (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace(str('<'), str('>'))
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
'data/regression.bin.tabledata.truth.{0}.xml'.format(
votable.version)),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")),
'rt', encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression(tmpdir):
_test_regression(tmpdir, False)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_python_based_parser(tmpdir):
_test_regression(tmpdir, True)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_binary2(tmpdir):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
self.table = parse(get_pkg_data_filename('data/regression.xml')).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
[b'String & test', b'String & test', b'XXXX',
b'', b''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.string_)
assert_array_equal(
self.array['string_test_2'],
[b'Fixed stri', b'0123456789', b'XXXX', b'', b''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if filename.endswith('custom_datatype.xml'):
continue
parse(filename)
@raises(VOTableSpecError)
def test_too_many_columns():
parse(get_pkg_data_filename('data/too_many_columns.xml.gz'))
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", datatype="char"),
tree.Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[(str('filename'), str('?')),
(str('matrix'), str('?'), (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
with catch_warnings():
result = validate(fpath,
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
@mock.patch('subprocess.Popen')
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ok', 'ko'),
'returncode': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename('data/empty_table.xml'),
xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
votable = parse(get_pkg_data_filename('data/regression.xml'))
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(
fd,
compressed=True,
_astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(fd)
def test_from_scratch_example():
with catch_warnings(VOWarning) as warning_lines:
try:
_run_test_from_scratch_example()
except ValueError as e:
warning_lines.append(str(e))
assert len(warning_lines) == 0
def _run_test_from_scratch_example():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'))
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'),
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
with catch_warnings():
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(get_pkg_data_filename('data/custom_datatype.xml'),
datatype_mapping={'bar': 'int'})
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
|
b199bee368a9f7d112105d689790c5743f2cdc7104180eb23d17a3bbf31b4d2f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import pytest
import numpy as np
from astropy.config import set_temp_config, reload_config
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from astropy.io.votable.table import parse, writeto
from astropy.io.votable import tree, conf
from astropy.io.votable.exceptions import VOWarning
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_table(tmpdir):
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/regression.xml'))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype']
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
from astropy.table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
# Issue 8354
assert t['float'].format is None
fn = os.path.join(str(tmpdir), "table_interface.xml")
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
from astropy.table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_explicit_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9',
'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
from astropy.table import Table
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
from astropy.table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
from astropy.table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_empty_table():
votable = parse(get_pkg_data_filename('data/empty_table.xml'))
table = votable.get_first_table()
astropy_table = table.to_table() # noqa
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
with catch_warnings(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 0
# Then try the various explicit options
def test_verify_ignore(self):
with catch_warnings(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), verify='ignore')
assert len(w) == 0
def test_verify_warn(self):
with catch_warnings(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), verify='warn')
assert len(w) == 25
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), verify='exception')
# Make sure the pedantic option still works for now (pending deprecation)
def test_pedantic_false(self):
with catch_warnings(VOWarning, AstropyDeprecationWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=False)
assert len(w) == 25
# Make sure we don't yet emit a deprecation warning
assert not any(isinstance(x.category, AstropyDeprecationWarning) for x in w)
def test_pedantic_true(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=True)
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp('verify', 'ignore'):
with catch_warnings(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 0
def test_conf_verify_warn(self):
with conf.set_temp('verify', 'warn'):
with catch_warnings(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 25
def test_conf_verify_exception(self):
with conf.set_temp('verify', 'exception'):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
# And make sure the old configuration item will keep working
def test_conf_pedantic_false(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = False')
reload_config('astropy.io.votable')
with catch_warnings(VOWarning, AstropyDeprecationWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 25
# Make sure we don't yet emit a deprecation warning
assert not any(isinstance(x.category, AstropyDeprecationWarning) for x in w)
def test_conf_pedantic_true(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = True')
reload_config('astropy.io.votable')
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
|
bb23efbecb9ee2a2948f084a8db98089036850862431a3f8204a849bba7eb68c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.io.votable import exceptions
from astropy.io.votable import tree
from astropy.tests.helper import raises
@raises(exceptions.W07)
def test_check_astroyear_fail():
config = {'verify': 'exception'}
field = tree.Field(None, name='astroyear')
tree.check_astroyear('X2100', field, config)
@raises(exceptions.W08)
def test_string_fail():
config = {'verify': 'exception'}
tree.check_string(42, 'foo', config)
def test_make_Fields():
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
table.fields.extend([tree.Field(votable, name='Test', datatype="float", unit="mag")])
|
7b3a6a4542b454c826201384b264525440aa102289ee7e1a0ff1a24d5d176cb1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
# THIRD-PARTY
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable import converters
from astropy.io.votable import exceptions
from astropy.io.votable import tree
from astropy.io.votable.table import parse_single_table
from astropy.tests.helper import raises, catch_warnings
from astropy.utils.data import get_pkg_data_filename
@raises(exceptions.E13)
def test_invalid_arraysize():
field = tree.Field(
None, name='broken', datatype='char', arraysize='foo')
converters.get_converter(field)
def test_oversize_char():
config = {'verify': 'exception'}
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with catch_warnings(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_oversize_unicode():
config = {'verify': 'exception'}
with catch_warnings(exceptions.W46) as w:
field = tree.Field(
None, name='c2', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
@raises(exceptions.E02)
def test_wrong_number_of_elements():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='int', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
@raises(ValueError)
def test_float_mask():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('') == (c.null, True)
c.parse('null')
def test_float_mask_permissive():
config = {'verify': 'ignore'}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('null') == (c.null, True)
@raises(exceptions.E02)
def test_complex_array_vararray():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
@raises(exceptions.E03)
def test_complex():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3")
@raises(exceptions.E04)
def test_bit():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("T")
def test_bit_mask():
config = {'verify': 'exception'}
with catch_warnings(exceptions.W39) as w:
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
@raises(exceptions.E05)
def test_boolean():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='boolean',
config=config)
c = converters.get_converter(field, config=config)
c.parse('YES')
def test_boolean_array():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='boolean', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse('TRUE FALSE T F 0 1')
assert_array_equal(r, [True, False, True, False, False, True])
@raises(exceptions.E06)
def test_invalid_type():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='foobar',
config=config)
c = converters.get_converter(field, config=config)
def test_precision():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float', precision="E4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2'
field = tree.Field(
None, name='c', datatype='float', precision="F4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2480'
@raises(exceptions.W51)
def test_integer_overflow():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='int', config=config)
c = converters.get_converter(field, config=config)
c.parse('-2208988800', config=config)
def test_float_default_precision():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float', arraysize="4",
config=config)
c = converters.get_converter(field, config=config)
assert (c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) ==
'1 2 3 8.9990234375')
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ['headA', 'headB', 'headC']
types = ["char", "double", "int"]
vals = [["A", 1.0, 2],
["B", 2.0, 3],
["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(tree.Field(
votable, name=heads[i], datatype=types[i], arraysize="*"))
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
def test_gemini_v1_2():
'''
see Pull Request 4782 or Issue 4781 for details
'''
table = parse_single_table(get_pkg_data_filename('data/gemini.xml'))
assert table is not None
|
5953a99960049831b886a3084bd7450e6e553bf82f46f5f2822ee0d150692b45 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from astropy.io.votable import table
from astropy.io.votable import exceptions
from astropy.io.votable import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write('FAILED: {0}\n'.format(reason).encode('utf-8'))
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail("HTTPException: {}".format(str(e)))
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify='warn', filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
"java -jar {} votlint validate=false {}".format(
path_to_stilts_jar, filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
'{}: {}'.format(warning_code, warning_descr),
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
'{}: {}'.format(exception_code, exception_descr),
exc, ['ul', 'li']))
return tables
|
b8f72ad1e31450498fccf3758a5713d97059118c0f45f28ab50c5d00733b081b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError("missing key '{0}'".format(k))
if k == "objective":
assert v == other[k], (
"Mismatched objectives. Expected '{0}', got '{1}'"
.format(v, other[k])
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
@pytest.fixture
def data():
rand = np.random.RandomState(123)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
dy = rand.uniform(0.005, 0.01, len(t))
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
y += dy * rand.randn(len(t))
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.RandomState(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.randn(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert np.allclose(results.period[np.argmax(results.power)],
1.9923406038842544)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert np.allclose(
results.power,
np.array([0.01421067, 0.02842475, 0.10867671, 0.05117755, 0.01783253])
)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(results, model.power(periods, durations,
method="slow",
objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], np.array([9, 7, 7, 7, 8]))
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
assert np.abs((stats[k][0]-f*params["depth"]) / stats[k][1]) < 1.0
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == 'transit_time':
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == 'objective':
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t_model was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t_model was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == 'transit_times':
assert_quantity_allclose((stats1[key] - start).to(u.day), stats2[key], atol=1e-10 * u.day)
elif key.startswith('depth'):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
with pytest.raises(TypeError) as exc:
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(TypeError) as exc:
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
|
171d5fd0c088056fa24dfdd26284779e7f8e3ed9df994cebc252e761a9498b23 | import pytest
from astropy import physical_constants, astronomical_constants
import astropy.constants as const
def test_version_match():
pversion = physical_constants.get()
refpversion = const.h.__class__.__name__.lower()
assert pversion == refpversion
aversion = astronomical_constants.get()
refaversion = const.M_sun.__class__.__name__.lower()
assert aversion == refaversion
def test_previously_imported():
with pytest.raises(RuntimeError):
physical_constants.set('codata2018')
with pytest.raises(RuntimeError):
astronomical_constants.set('iau2015')
|
86866b8c96e44afb77daf8b12793aeb9460603e21943c20ba13e359a2130d270 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from astropy.constants import Constant
from astropy.units import Quantity as Q
def test_c():
from astropy.constants.codata2010 import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from astropy.constants.codata2010 import h
from astropy.constants import h as h_current
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
# Check it is different than the current value
assert abs(h.value - h_current.value) > 4e-42
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
from astropy.constants.astropyconst13 import e
# A test quantity
E = Q(100.00000348276221, 'V/m')
# e.cgs is too ambiguous and should not work at all
with pytest.raises(TypeError):
e.cgs * E
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, 'eV/m')
assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m')
assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m')
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from astropy.constants.astropyconst13 import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == 'acceleration'
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from astropy.constants.astropyconst13 import b_wien
from astropy import units as u
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from astropy import units as u
from astropy.constants import astropyconst13 as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from astropy import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from astropy.constants import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, 'reference')
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, 'reference')
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
def test_context_manager():
from astropy import constants as const
with const.set_enabled_constants('astropyconst13'):
assert const.h.value == 6.62606957e-34 # CODATA2010
assert const.h.value == 6.626070040e-34 # CODATA2014
with pytest.raises(ImportError):
with const.set_enabled_constants('notreal'):
const.h
|
4341f6d792bd85c68a703d1c774f08ad56477142f21f0689714c0ca28739a95e | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 5)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of Django requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install django
This will install the latest version of Django which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of Django:
$ python -m pip install "django<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='Django',
version=version,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
url='https://www.djangoproject.com/',
author='Django Software Foundation',
author_email='[email protected]',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
long_description=read('README.rst'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
install_requires=['pytz'],
extras_require={
"bcrypt": ["bcrypt"],
"argon2": ["argon2-cffi >= 16.1.0"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docs.djangoproject.com/',
'Funding': 'https://www.djangoproject.com/fundraising/',
'Source': 'https://github.com/django/django',
'Tracker': 'https://code.djangoproject.com/',
},
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
|
c15b26a6fa245191f8e8738316cf3a160d3a06d31a8d566dd7c321dca923b713 | #!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from argparse import ArgumentParser
from subprocess import PIPE, Popen, call
import django
from django.conf import settings
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep '^[-+]msgid' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip())
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
settings.configure()
django.setup()
if resources is not None:
print("`update_catalogs` will always process all resources.")
contrib_dirs = _get_locale_dirs(None, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating en catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'])
print("Updating en JS catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'], domain='djangojs')
# Output changed stats
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
for name, dir_ in contrib_dirs:
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted(d for d in os.listdir(dir_) if not d.startswith('_'))
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
target_langs = sorted(d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en')
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
target_langs = languages
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in target_langs:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat --no-location -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = ArgumentParser()
parser.add_argument('cmd', nargs=1, choices=RUNABLE_SCRIPTS)
parser.add_argument("-r", "--resources", action='append', help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action='append', help="limit operation to the specified languages")
options = parser.parse_args()
eval(options.cmd[0])(options.resources, options.languages)
|
300f1f4d375bb33e7c0d957bfda0fb6ad78d8bd1ff758c286f11cc01ebf27fde | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Return a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
warnings.warn(
'render_to_response() is deprecated in favor of render(). It has the '
'same signature except that it also requires a request.',
RemovedInDjango30Warning, stacklevel=2,
)
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Return a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, permanent=False, **kwargs):
"""
Return an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
Issues a temporary redirect by default; pass permanent=True to issue a
permanent redirect.
"""
redirect_class = HttpResponsePermanentRedirect if permanent else HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Use get() to return an object, or raise a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Like with QuerySet.get(), MultipleObjectsReturned is raised if more than
one object is found.
"""
queryset = _get_queryset(klass)
if not hasattr(queryset, 'get'):
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Use filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
if not hasattr(queryset, 'filter'):
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = str(to)
if isinstance(to, str):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
63785a3bb1f46523a1c104f76ab7fc4d68f8accc630c67482f5e2cc0b471a3ef | from django.utils.version import get_version
VERSION = (2, 2, 0, 'alpha', 0)
__version__ = get_version(VERSION)
def setup(set_prefix=True):
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True.
"""
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(
'/' if settings.FORCE_SCRIPT_NAME is None else settings.FORCE_SCRIPT_NAME
)
apps.populate(settings.INSTALLED_APPS)
|
f5ae53a35bd05ea7f6260fde87c9cbbc87341979838c45efe2313541912a0459 | """
Invokes django-admin when the django module is run as a script.
Example: python -m django check
"""
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
ced3ddc90ae2bccd27e2e649550803646598cb1f46fabe22365bd7cc41a75e25 | #!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import (
RemovedInDjango30Warning, RemovedInDjango31Warning,
)
from django.utils.log import DEFAULT_LOGGING
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango30Warning)
warnings.simplefilter('error', RemovedInDjango31Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' not in f and
os.path.basename(f) not in SUBDIRS_TO_SKIP and
not os.path.isfile(f) and
os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
# Load all the test model apps.
test_modules = get_test_modules()
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run on Python 3.6+ (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags):
state = setup(verbosity, test_labels, parallel)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true', dest='failfast',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', dest='selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
options = parser.parse_args()
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(options.bisect, options, options.modules, options.parallel)
elif options.pair:
paired_tests(options.pair, options, options.modules, options.parallel)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
)
if failures:
sys.exit(1)
|
004a3a216bfe0ea8189282f7c7ecbdc2602a8dc9f4464a7ac854d55ea29fdf72 | """This URLconf exists because Django expects ROOT_URLCONF to exist. URLs
should be added within the test folders, and use TestCase.urls to set them.
This helps the tests remain isolated.
"""
urlpatterns = []
|
1304896c1d81194a1ddd9c8d119a4b41cb11909c91b4496464856cc3a203ba24 | # This is an example test settings file for use with the Django test suite.
#
# The 'sqlite3' backend requires only the ENGINE setting (an in-
# memory database will be used). All other backends will require a
# NAME and potentially authentication information. See the
# following section in the docs for more information:
#
# https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/
#
# The different databases that Django supports behave differently in certain
# situations, so it is recommended to run the test suite against as many
# database backends as possible. You may want to create a separate settings
# file for each of the backends you test against.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
SECRET_KEY = "django_tests_secret_key"
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
|
b81cfc18c6e633527f7a258353f301d3fd5b33b19c70145cd2d5f8c9a02d12aa | # Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/d/topic/sphinx-dev/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
"cve_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Workaround for https://bitbucket.org/dhellmann/sphinxcontrib-spelling/issues/13
html_use_smartypants = False
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.2'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver:
return pep440ver + '.dev'
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = '2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_theme']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://www.sphinx-doc.org/en/master/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ['app.add_directive']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': (
'\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}'
'\\DeclareUnicodeCharacter{2665}{[unicode-heart]}'
'\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}'
),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'ref/django-admin',
'django-admin',
'Utility script for the Django Web framework',
['Django Software Foundation'],
1
)]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- custom extension options --------------------------------------------------
cve_url = 'https://nvd.nist.gov/view/vuln/detail?vulnId=%s'
ticket_url = 'https://code.djangoproject.com/ticket/%s'
|
a8fdb4df33708daa255059d72cd6479ee067ec7373cb0f7f26438310244a65b7 | """Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver # NOQA
|
efb1c122d49f1933f334e9db84ce2c30d12049ac25f72335196c78f1226baf46 | import threading
import weakref
from django.utils.inspect import func_accepts_kwargs
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _ in self.receivers):
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
self.receivers = [
r for r in self.receivers
if not(isinstance(r[1], weakref.ReferenceType) and r[1]() is None)
]
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
0c54d4bde05865f38328753f5cdf94269be6e155de83efeec53107e12b02ddf6 | import sys
import unittest
from contextlib import contextmanager
from django.test import LiveServerTestCase, tag
from django.utils.module_loading import import_string
from django.utils.text import capfirst
class SeleniumTestCaseBase(type(LiveServerTestCase)):
# List of browsers to dynamically create test classes for.
browsers = []
# Sentinel value to differentiate browser-specific instances.
browser = None
def __new__(cls, name, bases, attrs):
"""
Dynamically create new classes and add them to the test module when
multiple browsers specs are provided (e.g. --selenium=firefox,chrome).
"""
test_class = super().__new__(cls, name, bases, attrs)
# If the test class is either browser-specific or a test base, return it.
if test_class.browser or not any(name.startswith('test') and callable(value) for name, value in attrs.items()):
return test_class
elif test_class.browsers:
# Reuse the created test class to make it browser-specific.
# We can't rename it to include the browser name or create a
# subclass like we do with the remaining browsers as it would
# either duplicate tests or prevent pickling of its instances.
first_browser = test_class.browsers[0]
test_class.browser = first_browser
# Create subclasses for each of the remaining browsers and expose
# them through the test's module namespace.
module = sys.modules[test_class.__module__]
for browser in test_class.browsers[1:]:
browser_test_class = cls.__new__(
cls,
"%s%s" % (capfirst(browser), name),
(test_class,),
{'browser': browser, '__module__': test_class.__module__}
)
setattr(module, browser_test_class.__name__, browser_test_class)
return test_class
# If no browsers were specified, skip this class (it'll still be discovered).
return unittest.skip('No browsers specified.')(test_class)
@classmethod
def import_webdriver(cls, browser):
return import_string("selenium.webdriver.%s.webdriver.WebDriver" % browser)
def create_webdriver(self):
return self.import_webdriver(self.browser)()
@tag('selenium')
class SeleniumTestCase(LiveServerTestCase, metaclass=SeleniumTestCaseBase):
implicit_wait = 10
@classmethod
def setUpClass(cls):
cls.selenium = cls.create_webdriver()
cls.selenium.implicitly_wait(cls.implicit_wait)
super().setUpClass()
@classmethod
def _tearDownClassInternal(cls):
# quit() the WebDriver before attempting to terminate and join the
# single-threaded LiveServerThread to avoid a dead lock if the browser
# kept a connection alive.
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super()._tearDownClassInternal()
@contextmanager
def disable_implicit_wait(self):
"""Disable the default implicit wait."""
self.selenium.implicitly_wait(0)
try:
yield
finally:
self.selenium.implicitly_wait(self.implicit_wait)
|
e45ae6be006bc73668e5454b5bd1d9d621c130c85c9d770d2f9636c676aa54f0 | """Django Unit Test framework."""
from django.test.client import Client, RequestFactory
from django.test.testcases import (
LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase,
skipIfDBFeature, skipUnlessAnyDBFeature, skipUnlessDBFeature,
)
from django.test.utils import (
ignore_warnings, modify_settings, override_settings,
override_system_checks, tag,
)
__all__ = [
'Client', 'RequestFactory', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'LiveServerTestCase', 'skipIfDBFeature',
'skipUnlessAnyDBFeature', 'skipUnlessDBFeature', 'ignore_warnings',
'modify_settings', 'override_settings', 'override_system_checks', 'tag',
]
|
58ca601dd653e59fb856cc8b9840364a84cb1eb974a8b428b579b17cdaccd447 | import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
setup_databases as _setup_databases, setup_test_environment,
teardown_databases as _teardown_databases, teardown_test_environment,
)
from django.utils.datastructures import OrderedSet
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = self.failures if issubclass(err[0], test.failureException) else self.errors
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class RemoteTestResult:
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
if tblib is not None:
tblib.pickling_support.install()
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print("""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(test, subtest, pickle_exc))
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
# Follow Python 3.5's implementation of unittest.TestResult.addSubTest()
# by not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(('addSubTest', self.test_index, subtest, err))
self.stop_if_failfast()
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""Default number of test processes when using the --parallel option."""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
if multiprocessing.get_start_method() != 'fork':
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter],
)
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_mode=False, debug_sql=False, parallel=0,
tags=None, exclude_tags=None, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'-t', '--top-level-directory', action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.',
)
parser.add_argument(
'-p', '--pattern', action='store', dest='pattern', default="test*.py",
help='The test matching pattern. Defaults to test*.py.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb',
help='Preserves the test DB between runs.'
)
parser.add_argument(
'-r', '--reverse', action='store_true', dest='reverse',
help='Reverses test cases order.',
)
parser.add_argument(
'--debug-mode', action='store_true', dest='debug_mode',
help='Sets settings.DEBUG to True.',
)
parser.add_argument(
'-d', '--debug-sql', action='store_true', dest='debug_sql',
help='Prints logged SQL queries on failure.',
)
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', action='append', dest='tags',
help='Run only tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', action='append', dest='exclude_tags',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
if self.tags or self.exclude_tags:
if self.verbosity >= 2:
if self.tags:
print('Including test tag(s): %s.' % ', '.join(sorted(self.tags)))
if self.exclude_tags:
print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags)))
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
self.parallel = min(self.parallel, parallel_units)
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def get_test_runner_kwargs(self):
return {
'failfast': self.failfast,
'resultclass': self.get_resultclass(),
'verbosity': self.verbosity,
}
def run_checks(self):
# Checks are run after database creation since some checks require
# database access.
call_command('check', verbosity=self.verbosity)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
self.run_checks()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def reorder_suite(suite, classes, reverse=False):
"""
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
filtered_suite = suite_class()
for test in suite:
if isinstance(test, suite_class):
filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags))
else:
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
all_tags = test_tags.union(test_fn_tags)
matched_tags = all_tags.intersection(tags)
if (matched_tags or not tags) and not all_tags.intersection(exclude_tags):
filtered_suite.addTest(test)
return filtered_suite
|
fa284d512c8c0527d9c18375cd1b9a50b6021eef8a2f5be8d18a9f0b5f59b741 | import json
import mimetypes
import os
import re
import sys
from copy import copy
from functools import partial
from http import HTTPStatus
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# JSON Vendor Tree spec: https://tools.ietf.org/html/rfc6838#section-3.2
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(vnd\..+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
filename = filename or key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
return {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return data.encode(charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict and content_type is
application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, dict)
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
return self.generic('GET', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
return self.generic('HEAD', path, secure=secure, **{
'QUERY_STRING': urlencode(data, doseq=True),
**extra,
})
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = parsed[4].encode().decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
raise exc_value
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, content_type=content_type, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, data='', content_type='', **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
redirect_status_codes = (
HTTPStatus.MOVED_PERMANENTLY,
HTTPStatus.FOUND,
HTTPStatus.SEE_OTHER,
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
)
while response.status_code in redirect_status_codes:
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT):
# Preserve request method post-redirect for 307/308 responses.
request_method = getattr(self, response.request['REQUEST_METHOD'].lower())
else:
request_method = self.get
data = QueryDict(url.query)
content_type = None
response = request_method(path, data=data, content_type=content_type, follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
|
6b54ebc01369ef566c0d2bf3ce4413da26cae844c59900e2a0aa0fb2193570a7 | import difflib
import json
import posixpath
import sys
import threading
import unittest
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.decorators import classproperty
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No template was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure:
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
connection.chunked_cursor = _CursorFailure(cls.__name__, connection.chunked_cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
connection.chunked_cursor = connection.chunked_cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# Do the tests in this class query non-default databases?
multi_db = False
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if cls.multi_db:
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""Return True if all connections support transactions."""
return all(conn.features.supports_transactions for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super().tearDownClass()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
|
371f2f142bd232bfa79547fd0a894c299241c394e67da8e42dab72e1d8ce6c56 | """Compare two HTML documents."""
import re
from html.parser import HTMLParser
WHITESPACE = re.compile(r'\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
class Element:
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, str):
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], str):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], str):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], str):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, str):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name') or self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
return self.children == element.children
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def _count(self, element, count=True):
if not isinstance(element, str):
if self == element:
return 1
if isinstance(element, RootElement):
if self.children == element.children:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, str):
if isinstance(element, str):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(str(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += '>'
return output
def __repr__(self):
return str(self)
class RootElement(Element):
def __init__(self):
super().__init__(None, ())
def __str__(self):
return ''.join(str(c) for c in self.children)
class HTMLParseError(Exception):
pass
class Parser(HTMLParser):
SELF_CLOSING_TAGS = (
'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base',
'col',
)
def __init__(self):
super().__init__(convert_charrefs=False)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Take a string that contains *valid* HTML and turn it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], str):
document = document.children[0]
return document
|
3f4717c5177dec9741abcb2fdb7b98c02cab4b4f772df0dd51fa2165ef2d81be | import collections
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, 'saved_data'):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = list(settings.ALLOWED_HOSTS) + ['testserver']
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, **kwargs):
"""Create the test databases."""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors()
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get('TEST', {}).get('SERIALIZE', True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors():
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, 'tags', ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, 'tags', ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes and leading and trailing whitespace.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(check_element(want, got) for want, got in zip(want_children, got_children))
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def str_prefix(s):
return s % {'_': ''}
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received.
Use unitttest.assertLogs() if you only need Python 3 support. This
private API will be removed after Python 2 EOL in 2020 (#27753).
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, 'tags'):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, 'tags', set(tags))
return obj
return decorator
|
05094ed295d94ce75c6e1816d6024b735e965b643be70c7fdb085e1b990287d5 | import os
import threading
import time
import warnings
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.db import connections, router
from django.db.utils import ConnectionRouter
from django.dispatch import Signal, receiver
from django.utils import timezone
from django.utils.formats import FORMAT_SETTINGS, reset_format_cache
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'}
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild get_app_template_dirs cache.
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}:
for conn in connections.all():
try:
del conn.timezone
except AttributeError:
pass
try:
del conn.timezone_name
except AttributeError:
pass
conn.ensure_timezone()
@receiver(setting_changed)
def clear_routers_cache(**kwargs):
if kwargs['setting'] == 'DATABASE_ROUTERS':
router.routers = ConnectionRouter().routers
@receiver(setting_changed)
def reset_template_engines(**kwargs):
if kwargs['setting'] in {
'TEMPLATES',
'DEBUG',
'FILE_CHARSET',
'INSTALLED_APPS',
}:
from django.template import engines
try:
del engines.templates
except AttributeError:
pass
engines._templates = None
engines._engines = {}
from django.template.engine import Engine
Engine.get_default.cache_clear()
from django.forms.renderers import get_default_renderer
get_default_renderer.cache_clear()
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def localize_settings_changed(**kwargs):
if kwargs['setting'] in FORMAT_SETTINGS or kwargs['setting'] == 'USE_THOUSAND_SEPARATOR':
reset_format_cache()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] == 'DEFAULT_FILE_STORAGE':
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# this stacklevel shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behavior."
% kwargs['setting'], stacklevel=6)
@receiver(setting_changed)
def root_urlconf_changed(**kwargs):
if kwargs['setting'] == 'ROOT_URLCONF':
from django.urls import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_STORAGE',
'STATIC_ROOT',
'STATIC_URL',
}:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
@receiver(setting_changed)
def static_finders_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_DIRS',
'STATIC_ROOT',
}:
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
@receiver(setting_changed)
def auth_password_validators_changed(**kwargs):
if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS':
from django.contrib.auth.password_validation import get_default_password_validators
get_default_password_validators.cache_clear()
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
apps.clear_cache()
try:
from django.contrib.auth import get_user_model
UserModel = get_user_model()
except ImproperlyConfigured:
# Some tests set an invalid AUTH_USER_MODEL.
pass
else:
from django.contrib.auth import backends
backends.UserModel = UserModel
from django.contrib.auth import forms
forms.UserModel = UserModel
from django.contrib.auth.handlers import modwsgi
modwsgi.UserModel = UserModel
from django.contrib.auth.management.commands import changepassword
changepassword.UserModel = UserModel
from django.contrib.auth import views
views.UserModel = UserModel
|
156c60fe798b3cd1aa5f048cc3442f66c28d42c895047ad2b0d7e000b217a90d | #!/usr/bin/env python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
5b2f6aa4679c5991527f7f219de91c748a21dd0365d96530d881c3905fb2b066 | import functools
import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from .config import AppConfig
class Apps:
"""
A registry that stores the configuration of installed applications.
It also keeps track of models, e.g. to provide reverse relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# Lock for thread-safe population.
self._lock = threading.RLock()
self.loading = False
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Load application configurations and models.
Import each application module and then each model module.
It is thread-safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# An RLock prevents other threads from entering this section. The
# compare and set operation below is atomic.
if self.loading:
# Prevent reentrant calls to avoid running AppConfig.ready()
# methods twice.
raise RuntimeError("populate() isn't reentrant")
self.loading = True
# Phase 1: initialize app configs and import app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Phase 2: import models modules.
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# Phase 3: run ready() methods of app configs.
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
def check_apps_ready(self):
"""Raise an exception if all apps haven't been imported yet."""
if not self.apps_ready:
from django.conf import settings
# If "not ready" is due to unconfigured settings, accessing
# INSTALLED_APPS raises a more helpful ImproperlyConfigured
# exception.
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""Raise an exception if all models haven't been imported yet."""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""Import applications and return an iterable of app configs."""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Import applications and returns an app config for the given label.
Raise LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@functools.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(list(app_config.get_models(include_auto_created, include_swapped)))
return result
def get_model(self, app_label, model_name=None, require_ready=True):
"""
Return the model matching the given app_label and model_name.
As a shortcut, app_label may be in the form <app_label>.<model_name>.
model_name is case-insensitive.
Raise LookupError if no application exists with this label, or no
model exists with this name in the application. Raise ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
if require_ready:
self.check_models_ready()
else:
self.check_apps_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
app_config = self.get_app_config(app_label)
if not require_ready and app_config.models is None:
app_config.import_models()
return app_config.get_model(model_name, require_ready=require_ready)
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (app_label, model_name),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Check whether an application with this name exists in the registry.
app_name is the full name of the app e.g. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Return the app config for the inner application in case of nesting.
Return None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
@functools.lru_cache(maxsize=None)
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with lru_cache because it's performance
critical when it comes to migrations. Since the swappable settings don't
change after Django has loaded the settings, there is no reason to get
the respective settings attribute over and over again.
"""
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restrict the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe in the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = {app_config.name for app_config in self.get_app_configs()}
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""Cancel a previous call to set_available_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enable a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (e.g. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.loading = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""Cancel a previous call to set_installed_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clear all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, more_models = model_keys[0], model_keys[1:]
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of Apps.register_model().
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
|
b74178c9c794e126f26cc79606fa6e13a46c19a10d9900956cd49d4ae5e210cb | from .config import AppConfig
from .registry import apps
__all__ = ['AppConfig', 'apps']
|
99ec968d72edd2a80f7ea335e0a19a5ac65a91b81afb0b875ecbb68241f243eb | import os
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig:
"""Class representing a Django application and its configuration."""
def __init__(self, app_name, app_module):
# Full Python path to the application e.g. 'django.contrib.admin'.
self.name = app_name
# Root module for the application e.g. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.py'>.
self.module = app_module
# Reference to the Apps registry that holds this AppConfig. Set by the
# registry when it registers the AppConfig instance.
self.apps = None
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application e.g. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application e.g. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory e.g.
# '/path/to/django/contrib/admin'.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models e.g. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.py'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python's _NamespacePath doesn't support
# indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return paths[0]
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
try:
app_module = import_module(app_name)
except ImportError:
raise ImproperlyConfigured(
"Cannot import '%s'. Check that '%s.%s.name' is correct." % (
app_name, mod_path, cls_name,
)
)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name, require_ready=True):
"""
Return the model with the given case-insensitive model_name.
Raise LookupError if no model exists with this name.
"""
if require_ready:
self.apps.check_models_ready()
else:
self.apps.check_apps_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.apps.check_models_ready()
for model in self.models.values():
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
self.models = self.apps.all_models[self.label]
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
|
0c6740b860b4b756cc8eef62f81f69fe0a8f8112051ed2d74dd23134a5851acc | from django.views.generic.base import View
__all__ = ['View']
|
475b52e394fcaf93fa5daf2e9b31fa45c3b8767848cc79e548a144a48a1ba47c | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import posixpath
import re
from pathlib import Path
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils._os import safe_join
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(path).lstrip('/')
fullpath = Path(safe_join(document_root, path))
if fullpath.is_dir():
if show_indexes:
return directory_index(path, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not fullpath.exists():
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = fullpath.stat()
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(str(fullpath))
content_type = content_type or 'application/octet-stream'
response = FileResponse(fullpath.open('rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Language" content="en-us">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
c = Context()
else:
c = {}
files = []
for f in fullpath.iterdir():
if not f.name.startswith('.'):
url = str(f.relative_to(fullpath))
if f.is_dir():
url += '/'
files.append(url)
c.update({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
b375d2f3228b24e61a47b47fafc73edc5dc677c9e31598040ba3042b854c002e | import itertools
import json
import os
import re
from urllib.parse import unquote
from django.apps import apps
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.template import Context, Engine
from django.urls import translate_url
from django.utils.formats import get_format
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language,
)
from django.utils.translation.trans_real import DjangoTranslation
from django.views.generic import View
LANGUAGE_QUERY_PARAMETER = 'language'
def set_language(request):
"""
Redirect to a given URL while setting the chosen language in the session
(if enabled) and in a cookie. The URL and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if ((next or not request.is_ajax()) and
not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure())):
next = request.META.get('HTTP_REFERER')
next = next and unquote(next) # HTTP_REFERER may be encoded.
if not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure()):
next = '/'
response = HttpResponseRedirect(next) if next else HttpResponse(status=204)
if request.method == 'POST':
lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
if next:
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
def get_formats():
"""Return all formats strings required for i18n to work."""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
return {attr: get_format(attr) for attr in FORMAT_SETTINGS}
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
class JavaScriptCatalog(View):
"""
Return the selected language catalog as a JavaScript library.
Receive the list of packages to check for translations in the `packages`
kwarg either from the extra dictionary passed to the url() function or as a
plus-sign delimited string from the request. Default is 'django.conf'.
You can override the gettext domain for this view, but usually you don't
want to do that as JavaScript messages go to the djangojs domain. This
might be needed if you deliver your JavaScript source from Django templates.
"""
domain = 'djangojs'
packages = None
def get(self, request, *args, **kwargs):
locale = get_language()
domain = kwargs.get('domain', self.domain)
# If packages are not provided, default to all installed packages, as
# DjangoTranslation without localedirs harvests them all.
packages = kwargs.get('packages', '')
packages = packages.split('+') if packages else self.packages
paths = self.get_paths(packages) if packages else None
self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_paths(self, packages):
allowable_packages = {app_config.name: app_config for app_config in apps.get_app_configs()}
app_configs = [allowable_packages[p] for p in packages if p in allowable_packages]
if len(app_configs) < len(packages):
excluded = [p for p in packages if p not in allowable_packages]
raise ValueError(
'Invalid package(s) provided to JavaScriptCatalog: %s' % ','.join(excluded)
)
# paths of requested packages
return [os.path.join(app.path, 'locale') for app in app_configs]
@property
def _num_plurals(self):
"""
Return the number of plurals for this catalog language, or 2 if no
plural string is available.
"""
match = re.search(r'nplurals=\s*(\d+)', self._plural_string or '')
if match:
return int(match.groups()[0])
return 2
@property
def _plural_string(self):
"""
Return the plural string (including nplurals) for this catalog language,
or None if no plural string is available.
"""
if '' in self.translation._catalog:
for line in self.translation._catalog[''].split('\n'):
if line.startswith('Plural-Forms:'):
return line.split(':', 1)[1].strip()
return None
def get_plural(self):
plural = self._plural_string
if plural is not None:
# This should be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
return plural
def get_catalog(self):
pdict = {}
num_plurals = self._num_plurals
catalog = {}
trans_cat = self.translation._catalog
trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {}
seen_keys = set()
for key, value in itertools.chain(trans_cat.items(), trans_fallback_cat.items()):
if key == '' or key in seen_keys:
continue
if isinstance(key, str):
catalog[key] = value
elif isinstance(key, tuple):
msgid, cnt = key
pdict.setdefault(msgid, {})[cnt] = value
else:
raise TypeError(key)
seen_keys.add(key)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(num_plurals)]
return catalog
def get_context_data(self, **kwargs):
return {
'catalog': self.get_catalog(),
'formats': get_formats(),
'plural': self.get_plural(),
}
def render_to_response(self, context, **response_kwargs):
def indent(s):
return s.replace('\n', '\n ')
template = Engine().from_string(js_catalog_template)
context['catalog_str'] = indent(
json.dumps(context['catalog'], sort_keys=True, indent=2)
) if context['catalog'] else None
context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2))
return HttpResponse(template.render(Context(context)), 'text/javascript; charset="utf-8"')
class JSONCatalog(JavaScriptCatalog):
"""
Return the selected language catalog as a JSON object.
Receive the same parameters as JavaScriptCatalog and return a response
with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
def render_to_response(self, context, **response_kwargs):
return JsonResponse(context)
|
43bab1b9c8d41cfa109a38d3fe9f777abc465cd68d2b64c73b8399ae14ccc3e9 | from django.http import (
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound,
HttpResponseServerError,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.views.decorators.csrf import requires_csrf_token
ERROR_404_TEMPLATE_NAME = '404.html'
ERROR_403_TEMPLATE_NAME = '403.html'
ERROR_400_TEMPLATE_NAME = '400.html'
ERROR_500_TEMPLATE_NAME = '500.html'
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, exception, template_name=ERROR_404_TEMPLATE_NAME):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
exception
The message from the exception which triggered the 404 (if one was
supplied), or the exception class name
"""
exception_repr = exception.__class__.__name__
# Try to get an "interesting" exception message, if any (and not the ugly
# Resolver404 dictionary)
try:
message = exception.args[0]
except (AttributeError, IndexError):
pass
else:
if isinstance(message, str):
exception_repr = message
context = {
'request_path': request.path,
'exception': exception_repr,
}
try:
template = loader.get_template(template_name)
body = template.render(context, request)
content_type = None # Django will use DEFAULT_CONTENT_TYPE
except TemplateDoesNotExist:
if template_name != ERROR_404_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
template = Engine().from_string(
'<h1>Not Found</h1>'
'<p>The requested URL {{ request_path }} was not found on this server.</p>')
body = template.render(Context(context))
content_type = 'text/html'
return HttpResponseNotFound(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name=ERROR_500_TEMPLATE_NAME):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_500_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseServerError('<h1>Server Error (500)</h1>', content_type='text/html')
return HttpResponseServerError(template.render())
@requires_csrf_token
def bad_request(request, exception, template_name=ERROR_400_TEMPLATE_NAME):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_400_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseBadRequest('<h1>Bad Request (400)</h1>', content_type='text/html')
# No exception content is passed to the template, to not disclose any sensitive information.
return HttpResponseBadRequest(template.render())
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 7231) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_403_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseForbidden('<h1>403 Forbidden</h1>', content_type='text/html')
return HttpResponseForbidden(
template.render(request=request, context={'exception': str(exception)})
)
|
91977f05ba59f09552b0522eb65e2dc5b3d3110691a9499eb88748a1662ee7d6 | import functools
import re
import sys
import types
from pathlib import Path
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import Resolver404, resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={'i18n': 'django.templatetags.i18n'},
)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.IGNORECASE)
CLEANSED_SUBSTITUTE = '********************'
CURRENT_DIR = Path(__file__).parent
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the value
is a dictionary, recursively cleanse the keys in that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@functools.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter:
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, v))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = '[unable to retrieve the current user]'
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'user_str': user_str,
'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = str(self.exc_value)
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [str(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = '<source code not available>'
post_context = []
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
with Path(CURRENT_DIR, 'templates', 'technical_404.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': str(exception),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open() as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context({
'version': get_docs_version(),
})
return HttpResponse(t.render(c), content_type='text/html')
|
4ce9d7112e70d01994bf2abbc21181e99e83b38f91a2d2156a698518df3c50a6 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.translation import gettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; color:#000; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
<p>{{ no_referer3 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template's <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
<li>The form has a valid CSRF token. After logging in in another browser
tab or hitting the back button after a login, you may need to reload the
page with the form, because the token is rotated after a login.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html"
def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
c = {
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_referer3': _(
"If you are using the <meta name=\"referrer\" "
"content=\"no-referrer\"> tag or including the 'Referrer-Policy: "
"no-referrer' header, please remove them. The CSRF protection "
"requires the 'Referer' header to do strict referer checking. If "
"you're concerned about privacy, use alternatives like "
"<a rel=\"noreferrer\" ...> for links to third-party sites."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
}
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the string template.
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
c = Context(c)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c), content_type='text/html')
|
07a946ad3c5b288cc8eeff05a370c4b5f720dcde9c6ee3b905e3edb8ea1d25f2 | """
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import warnings
from pathlib import Path
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if self._wrapped is empty:
self._setup(name)
val = getattr(self._wrapped, name)
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == '_wrapped':
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
class Settings:
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if self.is_overridden('DEFAULT_CONTENT_TYPE'):
warnings.warn('The DEFAULT_CONTENT_TYPE setting is deprecated.', RemovedInDjango30Warning)
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = Path('/usr/share/zoneinfo')
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split('/'))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
if name == 'DEFAULT_CONTENT_TYPE':
warnings.warn('The DEFAULT_CONTENT_TYPE setting is deprecated.', RemovedInDjango30Warning)
super().__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s for s in list(self.__dict__) + dir(self.default_settings)
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
|
1f6a048afb46939936fb269646541f37717dc4ca5756844f51384418e5cf9567 | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
bffcefa3c6000fb5baa0f0c21dee31b1a3daef36bcbe3ab3f0728749b14fd811 | "Functions that help with dynamically creating decorators for views."
# For backwards compatibility in Django 2.0.
from contextlib import ContextDecorator # noqa
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
class classonlymethod(classmethod):
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super().__get__(instance, cls)
def _update_method_wrapper(_wrapper, decorator):
# _multi_decorate()'s bound_method isn't available in this scope. Cheat by
# using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
def _multi_decorate(decorators, method):
"""
Decorate `method` with one or more function decorators. `decorators` can be
a single decorator or an iterable of decorators.
"""
if hasattr(decorators, '__iter__'):
# Apply a list/tuple of decorators if 'decorators' is one. Decorator
# functions are applied so that the call order is the same as the
# order in which they appear in the iterable.
decorators = decorators[::-1]
else:
decorators = [decorators]
def _wrapper(self, *args, **kwargs):
# bound_method has the signature that 'decorator' expects i.e. no
# 'self' argument.
bound_method = method.__get__(self, type(self))
for dec in decorators:
bound_method = dec(bound_method)
return bound_method(*args, **kwargs)
# Copy any attributes that a decorator adds to the function it decorates.
for dec in decorators:
_update_method_wrapper(_wrapper, dec)
# Preserve any existing attributes of 'method', including the name.
update_wrapper(_wrapper, method)
return _wrapper
def method_decorator(decorator, name=''):
"""
Convert a function decorator into a method decorator
"""
# 'obj' can be a class or a function. If 'obj' is a function at the time it
# is passed to _dec, it will eventually be a method of the class it is
# defined on. If 'obj' is a class, the 'name' is required to be the name
# of the method that will be decorated.
def _dec(obj):
if not isinstance(obj, type):
return _multi_decorate(decorator, obj)
if not (name and hasattr(obj, name)):
raise ValueError(
"The keyword argument `name` must be the name of a method "
"of the decorated class: %s. Got '%s' instead." % (obj, name)
)
method = getattr(obj, name)
if not callable(method):
raise TypeError(
"Cannot decorate '%s' as it isn't a callable attribute of "
"%s (%s)." % (name, obj, method)
)
_wrapper = _multi_decorate(decorator, method)
setattr(obj, name, _wrapper)
return obj
# Don't worry about making _dec look similar to a list/tuple as it's rather
# meaningless.
if not hasattr(decorator, '__iter__'):
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
obj = decorator if hasattr(decorator, '__name__') else decorator.__class__
_dec.__name__ = 'method_decorator(%s)' % obj.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but return a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), return a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
# Unused, for backwards compatibility in Django 2.0.
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This was required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
return WRAPPER_ASSIGNMENTS
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
def callback(response):
return middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
class classproperty:
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
return self.fget(cls)
def getter(self, method):
self.fget = method
return self
|
81ea02ddfb5e88f01e037b690b82d0d60f7ebdf60bab9e3e9174c246602b15d8 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
import re
import time as ttime
from datetime import (
date as real_date, datetime as real_datetime, time as real_time,
)
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
@classmethod
def combine(cls, date, time):
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second,
time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
class time(real_time):
pass
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while True:
i = text.find(substr, i)
if i == -1:
break
sites.append(i)
i += 1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = ttime.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
|
1c1d9118789c16f582796ddcd83dabf4e1581310d0535f33d39eb9930cf4c733 | # Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import subprocess
import sys
import time
import traceback
import _thread
from django.apps import apps
from django.conf import settings
from django.core.signals import request_finished
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
RUN_RELOADER = True
FILE_MODIFIED = 1
I18N_MODIFIED = 2
_mtimes = {}
_win = (sys.platform == "win32")
_exception = None
_error_files = []
_cached_modules = set()
_cached_filenames = []
def gen_filenames(only_new=False):
"""
Return a list of filenames referenced in sys.modules and translation files.
"""
# N.B. ``list(...)`` is needed, because this runs in parallel with
# application code which might be mutating ``sys.modules``, and this will
# fail with RuntimeError: cannot mutate dictionary while iterating
global _cached_modules, _cached_filenames
module_values = set(sys.modules.values())
_cached_filenames = clean_files(_cached_filenames)
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames + clean_files(_error_files)
new_modules = module_values - _cached_modules
new_filenames = clean_files(
[filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')])
if not _cached_filenames and settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for app_config in reversed(list(apps.get_app_configs())):
basedirs.append(os.path.join(app_config.path, 'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
new_filenames.append(os.path.join(dirpath, filename))
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
if only_new:
return new_filenames + clean_files(_error_files)
else:
return _cached_filenames + clean_files(_error_files)
def clean_files(filelist):
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def reset_translations():
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = threading.local()
def inotify_code_changed():
"""
Check for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
class EventHandler(pyinotify.ProcessEvent):
modified_code = None
def process_default(self, event):
if event.path.endswith('.mo'):
EventHandler.modified_code = I18N_MODIFIED
else:
EventHandler.modified_code = FILE_MODIFIED
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, EventHandler())
def update_watch(sender=None, **kwargs):
if sender and getattr(sender, 'handles_files', False):
# No need to update watches when request serves files.
# (sender is supposed to be a django.core.handlers.BaseHandler subclass)
return
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE |
pyinotify.IN_DELETE_SELF |
pyinotify.IN_MOVE_SELF
)
for path in gen_filenames(only_new=True):
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.read_events()
notifier.process_events()
notifier.stop()
# If we are here the code must have changed.
return EventHandler.modified_code
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return I18N_MODIFIED if filename.endswith('.mo') else FILE_MODIFIED
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
else:
fn = code_changed
while RUN_RELOADER:
change = fn()
if change == FILE_MODIFIED:
sys.exit(3) # force reload
elif change == I18N_MODIFIED:
reset_translations()
time.sleep(1)
def restart_with_reloader():
import django.__main__
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
new_environ = {**os.environ, 'RUN_MAIN': 'true'}
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
_thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
wrapped_main_func = check_errors(main_func)
python_reloader(wrapped_main_func, args, kwargs)
|
bc3769edf1acb5e78b75685261a89dade62c94bd9f10f2e999bf93b3d7048482 | import html.entities
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.translation import gettext as _, gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
return x and str(x)[0].upper() + str(x)[1:]
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.S)
re_chars = re.compile(r'<.*?>|(.)', re.S)
re_tag = re.compile(r'<(/)?(\S+?)(?:(\s*/)|\s.*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
Preserve all white space except added line breaks consume the space on
which they break the line.
Don't wrap long words, thus the output text may have lines longer than
``width``.
"""
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super().__init__(lambda: str(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis
(...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncate HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Close opened tags if they were correctly closed in the given HTML.
Preserve newlines in the HTML.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if not list_:
return ''
if len(list_) == 1:
return str(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1])
)
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
return re_newlines.sub('\n', str(text))
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer:
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
for bit in smart_split_re.finditer(str(text)):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return chr(c)
except ValueError:
return match.group(0)
else:
try:
return chr(html.entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, str(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(str, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lower case. Strip surrounding whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
|
d4a35e4e0a013e765814cae3eb43f44ea95714a5111b94e4f72a6f119a242d58 | """Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils.timezone import get_fixed_timezone, utc
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days?, )?)?'
r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>-?\d+):)?'
r'(?P<seconds>-?\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by
# timedelta
iso8601_duration_re = re.compile(
r'^(?P<sign>[-+]?)'
r'P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
# Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The
# year-month and mixed intervals cannot be converted to a timedelta and thus
# aren't accepted.
postgres_interval_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days? ?))?'
r'(?:(?P<sign>[-+])?'
r'(?P<hours>\d+):'
r'(?P<minutes>\d\d):'
r'(?P<seconds>\d\d)'
r'(?:\.(?P<microseconds>\d{1,6}))?'
r')?$'
)
def parse_date(value):
"""Parse a string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Return None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in match.groupdict().items()}
return datetime.date(**kw)
def parse_time(value):
"""Parse a string and return a datetime.time.
This function doesn't support time zone offsets.
Raise ValueError if the input is well formatted but not a valid time.
Return None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0')
kw = {k: int(v) for k, v in kw.items() if v is not None}
return datetime.time(**kw)
def parse_datetime(value):
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Return None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in kw.items() if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
def parse_duration(value):
"""Parse a duration string and return a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation and PostgreSQL's day-time interval
format.
"""
match = (
standard_duration_re.match(value) or
iso8601_duration_re.match(value) or
postgres_interval_re.match(value)
)
if match:
kw = match.groupdict()
days = datetime.timedelta(float(kw.pop('days', 0) or 0))
sign = -1 if kw.pop('sign', '+') == '-' else 1
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
kw['microseconds'] = '-' + kw['microseconds']
kw = {k: float(v) for k, v in kw.items() if v is not None}
return days + sign * datetime.timedelta(**kw)
|
07714e29a14dca7a747c2d7096222d3d22b3340052c9cfb4c7cf7b17b904563e | """
Utilities for XML generation/parsing.
"""
import re
from collections import OrderedDict
from xml.sax.saxutils import XMLGenerator
class UnserializableContentError(ValueError):
pass
class SimplerXMLGenerator(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None:
attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
def characters(self, content):
if content and re.search(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]', content):
# Fail loudly when content has control chars (unsupported in XML 1.0)
# See http://www.w3.org/International/questions/qa-controls
raise UnserializableContentError("Control characters are not supported in XML 1.0")
XMLGenerator.characters(self, content)
def startElement(self, name, attrs):
# Sort attrs for a deterministic output.
sorted_attrs = OrderedDict(sorted(attrs.items())) if attrs else attrs
super().startElement(name, sorted_attrs)
|
7537e6cd6a847f627366ad30c659c4443a2be9743ead6ede2bc33ed37da4385d | from decimal import Decimal
from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
force_grouping=False):
"""
Get a number (as a number or string), and return it as a string,
using formats defined as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator.
For non-uniform digit grouping, it can be a sequence with the number
of digit group sizes following the format used by the Python locale
module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)).
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
use_grouping = use_grouping or force_grouping
use_grouping = use_grouping and grouping != 0
# Make the common case fast
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(str(number))
# sign
sign = ''
if isinstance(number, Decimal):
str_number = '{:f}'.format(number)
else:
str_number = str(number)
if str_number[0] == '-':
sign = '-'
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos is not None:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos is not None:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
dec_part = dec_part and decimal_sep + dec_part
# grouping
if use_grouping:
try:
# if grouping is a sequence
intervals = list(grouping)
except TypeError:
# grouping is a single value
intervals = [grouping, 0]
active_interval = intervals.pop(0)
int_part_gd = ''
cnt = 0
for digit in int_part[::-1]:
if cnt and cnt == active_interval:
if intervals:
active_interval = intervals.pop(0) or active_interval
int_part_gd += thousand_sep[::-1]
cnt = 0
int_part_gd += digit
cnt += 1
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
ea23f4e10b4801cdd0c84116b2f2c0ac58bbe0d9aab48197d2f62d1db4c9c839 | import datetime
import functools
import os
import subprocess
import sys
from distutils.version import LooseVersion
# Private, stable API for detecting the Python version. PYXY means "Python X.Y
# or later". So that third-party apps can use these values, each constant
# should remain as long as the oldest supported Django version supports that
# Python version.
PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
PY38 = sys.version_info >= (3, 8)
PY39 = sys.version_info >= (3, 9)
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
def get_version_tuple(version):
"""
Return a tuple of version numbers (e.g. (1, 2, 3)) from the version
string (e.g. '1.2.3').
"""
loose_version = LooseVersion(version)
version_numbers = []
for item in loose_version.version:
if not isinstance(item, int):
break
version_numbers.append(item)
return tuple(version_numbers)
|
b51995cd776ca70d2129d6beea6daf0f45e054a1d98712e1dd1e802be12bba50 | """
Timezone-related classes and functions.
"""
import datetime
import functools
from contextlib import ContextDecorator
from datetime import timedelta, tzinfo
from threading import local
import pytz
from django.conf import settings
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# UTC time zone as a tzinfo instance.
# (Use utc = datetime.timezone.utc here when PY35 isn't supported.)
utc = datetime.timezone(ZERO, 'UTC')
def get_fixed_timezone(offset):
"""Return a tzinfo instance with a fixed offset from UTC."""
if isinstance(offset, timedelta):
offset = offset.total_seconds() // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@functools.lru_cache()
def get_default_timezone():
"""
Return the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
return pytz.timezone(settings.TIME_ZONE)
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""Return the name of the default time zone."""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""Return the currently active time zone as a tzinfo instance."""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""Return the name of the currently active time zone."""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""Return the name of ``timezone``."""
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Set the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, str):
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unset the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses django.utils.timezone.activate()
to set the timezone on entry and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, Django enables the default
time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Check if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime.datetime) and
(settings.USE_TZ if use_tz is None else use_tz) and
not is_naive(value) and
getattr(value, 'convert_to_local_time', True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value=None, timezone=None):
"""
Convert an aware datetime.datetime to local time.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if value is None:
value = now()
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("localtime() cannot be applied to a naive datetime")
return value.astimezone(timezone)
def localdate(value=None, timezone=None):
"""
Convert an aware datetime to local time and return the value's date.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone is
specified.
"""
return localtime(value, timezone).date()
def now():
"""
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
return datetime.datetime.now(utc if settings.USE_TZ else None)
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determine if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determine if a given datetime.datetime is naive.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""Make a naive datetime.datetime in a given time zone aware."""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""Make an aware datetime.datetime naive in a given time zone."""
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
return value.astimezone(timezone).replace(tzinfo=None)
|
c6c574bcd11efe726c702b2cc90c424f1a34fb7c5086af057a32213667f1af58 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node:
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""Construct a new Node. If no connector is given, use the default."""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
# Required because django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
@classmethod
def _new_instance(cls, children=None, connector=None, negated=False):
"""
Create a new instance of this class when new Nodes (or subclasses) are
needed in the internal code in this class. Normally, it just shadows
__init__(). However, subclasses with an __init__ signature that aren't
an extension of Node.__init__ might need to implement this method to
allow a Node to create a new instance of them (if they have any extra
setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
def __str__(self):
template = '(NOT (%s: %s))' if self.negated else '(%s: %s)'
return template % (self.connector, ', '.join(str(c) for c in self.children))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __deepcopy__(self, memodict):
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""Return the the number of children this node has."""
return len(self.children)
def __bool__(self):
"""Return whether or not this node has children."""
return bool(self.children)
def __contains__(self, other):
"""Return True if 'other' is a direct child of this instance."""
return other in self.children
def __eq__(self, other):
return (
self.__class__ == other.__class__ and
(self.connector, self.negated) == (other.connector, other.negated) and
self.children == other.children
)
def __hash__(self):
return hash((self.__class__, self.connector, self.negated) + tuple(self.children))
def add(self, data, conn_type, squash=True):
"""
Combine this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
Return a node which can be used in place of data regardless if the
node other got squashed or not.
If `squash` is False the data is prepared and added as a child to
this tree without further logic.
"""
if data in self.children:
return data
if not squash:
self.children.append(data)
return data
if self.connector == conn_type:
# We can reuse self.children to append or squash the node other.
if (isinstance(data, Node) and not data.negated and
(data.connector == conn_type or len(data) == 1)):
# We can squash the other node's children directly into this
# node. We are just doing (AB)(CD) == (ABCD) here, with the
# addition that if the length of the other node is 1 the
# connector doesn't matter. However, for the len(self) == 1
# case we don't want to do the squashing, as it would alter
# self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, data]
return data
def negate(self):
"""Negate the sense of the root connector."""
self.negated = not self.negated
|
593e48045502659110457044de81054bd3b09be8c53ee7e31b398dce9c57921e | import inspect
import warnings
class RemovedInDjango30Warning(DeprecationWarning):
pass
class RemovedInDjango31Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango30Warning
class warn_about_renamed_method:
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning, 2
)
return super().__instancecheck__(instance)
class MiddlewareMixin:
def __init__(self, get_response=None):
self.get_response = get_response
super().__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
|
e424239ebdf859618dd9aa1ebe7dc2a10cbc39999a201b562138b8fdec9b8d4d | import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils import six
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
if isinstance(s, Promise) or not isinstance(s, str):
return str(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
smart_str = smart_text
force_str = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987, slightly simplified
since the input is assumed to be a string rather than an arbitrary byte
stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.parse.quote() already considers all
# but the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ['%02x', '%02X']
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# unicode characters.
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte.update({
(a + b).encode(): bytes.fromhex(a + b)
for a in _hexdig[8:] for b in _hexdig
})
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unqote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b'%')
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b'%')
append(item)
iri = b''.join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
try:
path.decode()
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(path.replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
208aa05e3b8f9d72ee7c263e9d7eb5a529c0bad56baff7105e74aaea3adcb4dc | """
Django's standard crypto functions and utilities.
"""
import hashlib
import hmac
import random
import time
from django.conf import settings
from django.utils.encoding import force_bytes
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def salted_hmac(key_salt, value, secret=None):
"""
Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
('%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()
).digest()
)
return ''.join(random.choice(allowed_chars) for i in range(length))
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""Return the hash of password using pbkdf2."""
if digest is None:
digest = hashlib.sha256
dklen = dklen or None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
|
e1a5ad4e983e5b108117755d8c4d33d50f66fb4bc48d4b388417f4c10dac78d9 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""Represent multiple possibilities at this point in a pattern string."""
class Group(list):
"""Represent a capturing group in the pattern string."""
class NonCapture(list):
"""Represent a non-capturing group in the pattern string."""
def normalize(pattern):
r"""
Given a reg-exp pattern, normalize it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore look-ahead and look-behind assertions.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in '!=<':
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yield the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. Walk to the close of
this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Return the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Return True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turn the given source sequence into a list of reg-exp possibilities and
their arguments. Return a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, str):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
e20facbcb526b2f9f6c254beacf1096c5aff383aa86abe6ac702231b1acb9710 | from functools import lru_cache # noqa
# Deprecate or remove this module when no supported version of Django still
# supports Python 2. Until then, keep it to allow pluggable apps to support
# Python 2 and Python 3 without raising a deprecation warning.
|
a26c42c813521a7df0abe2b53f1d796731001f6378e6b7c4a83706a737172c2d | import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import gettext, ngettext_lazy
TIME_STRINGS = {
'year': ngettext_lazy('%d year', '%d years'),
'month': ngettext_lazy('%d month', '%d months'),
'week': ngettext_lazy('%d week', '%d weeks'),
'day': ngettext_lazy('%d day', '%d days'),
'hour': ngettext_lazy('%d hour', '%d hours'),
'minute': ngettext_lazy('%d minute', '%d minutes'),
}
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, 'year'),
(60 * 60 * 24 * 30, 'month'),
(60 * 60 * 24 * 7, 'week'),
(60 * 60 * 24, 'day'),
(60 * 60, 'hour'),
(60, 'minute'),
)
def timesince(d, now=None, reversed=False, time_strings=None):
"""
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes".
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
`time_strings` is an optional dict of strings to replace the default
TIME_STRINGS dict.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
if time_strings is None:
time_strings = TIME_STRINGS
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
now = now or datetime.datetime.now(utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Deal with leapyears by subtracing the number of leapdays
leapdays = calendar.leapdays(d.year, now.year)
if leapdays != 0:
if calendar.isleap(d.year):
leapdays -= 1
elif calendar.isleap(now.year):
leapdays += 1
delta -= datetime.timedelta(leapdays)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(gettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(time_strings[name] % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += gettext(', ') + avoid_wrapping(time_strings[name2] % count2)
return result
def timeuntil(d, now=None, time_strings=None):
"""
Like timesince, but return a string measuring the time until the given time.
"""
return timesince(d, now, reversed=True, time_strings=time_strings)
|
41642f9adb5ea8ac399be10c8f864ad99c12c7e0ec954564f75cf43648a5ca5c | import inspect
def get_func_args(func):
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def get_func_full_args(func):
"""
Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included.
"""
sig = inspect.signature(func)
args = []
for arg_name, param in sig.parameters.items():
name = arg_name
# Ignore 'self'
if name == 'self':
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = '*' + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = '**' + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args
def func_accepts_kwargs(func):
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
)
def func_has_no_args(func):
args = [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
return len(args) == 1
def func_supports_parameter(func, parameter):
return parameter in inspect.signature(func).parameters
|
ce20ba72e3ea72fb04edd7da74984a8fb197da25709793b243e10d22dded80a0 | import copy
from collections import OrderedDict
class OrderedSet:
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict.fromkeys(iterable or ())
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict)
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __len__(self):
return len(self.dict)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super().__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, super().__repr__())
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
try:
list_ = super().__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(key)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super().__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}}
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _getlist(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def getlist(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._getlist(key, default, force_list=True)
def setlist(self, key, list_):
super().__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self._getlist(key)
def appendlist(self, key, value):
"""Append an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def lists(self):
"""Yield (key, list) pairs."""
return iter(super().items())
def values(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
def copy(self):
"""Return a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""Extend rather than replace existing key lists."""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.items():
self.setlistdefault(key).append(value)
def dict(self):
"""Return current object as a dict with singular values."""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wrap accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super().__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix):]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
|
6325177b362a1bf47e84ca92878439d2ac937ef45e0ffe134c7543d9cd731f85 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
import calendar
import datetime
import re
import time
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import gettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter:
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(str(formatstr))):
if i % 2:
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(str(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
return self.data.tzname() or ''
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self): # NOQA: E743
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
name = None
try:
name = self.timezone.tzname(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
pass
if name is None:
name = self.format('O')
return str(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
offset = self.timezone.utcoffset(self.data)
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ""
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743
"'1' if Daylight Savings Time, '0' otherwise."
try:
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
except Exception:
# pytz raises AmbiguousTimeError during the autumn DST change.
# This happens mainly when __init__ receives a naive datetime
# and sets self.timezone = get_default_timezone().
return ''
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return str(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
ce64e514b01dbf6d018411e83c20c1053d8decf014ae461cb405031c5d6450fb | import copy
import os
from importlib import import_module
from importlib.util import find_spec as importlib_find
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
try:
return importlib_find(full_module_name, package_path) is not None
except (ImportError, AttributeError):
# When module_name is an invalid dotted path, Python raises ImportError
# (or ModuleNotFoundError in Python 3.6+). AttributeError may be raised
# if the penultimate part of the path is not a package.
# (http://bugs.python.org/issue30436)
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
|
79ca1cafb18acad0a6a7d15bc8e2c3511e3fd2f9e070e8cc248b714c69d3455f | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X:
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr:
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super().__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super().__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super().__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter:
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator:
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
|
c5845e22a705d85143f3906a0eb978f31a385228c823d0fa6320be147b143dbc | # Copyright (c) 2010 Guilherme Gondim. All rights reserved.
# Copyright (c) 2009 Simon Willison. All rights reserved.
# Copyright (c) 2002 Drew Perttula. All rights reserved.
#
# License:
# Python Software Foundation License version 2
#
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
# ALL WARRANTIES.
#
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
# code so it may be used in proprietary projects just like prior ``baseconv``
# distributions.
#
# All trademarks referenced herein are property of their respective holders.
#
"""
Convert numbers from base 10 integers to base X strings and back again.
Sample usage::
>>> base20 = BaseConverter('0123456789abcdefghij')
>>> base20.encode(1234)
'31e'
>>> base20.decode('31e')
1234
>>> base20.encode(-1234)
'-31e'
>>> base20.decode('-31e')
-1234
>>> base11 = BaseConverter('0123456789-', sign='$')
>>> base11.encode('$1234')
'$-22'
>>> base11.decode('$-22')
'$1234'
"""
BASE2_ALPHABET = '01'
BASE16_ALPHABET = '0123456789ABCDEF'
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
class BaseConverter:
decimal_digits = '0123456789'
def __init__(self, digits, sign='-'):
self.sign = sign
self.digits = digits
if sign in self.digits:
raise ValueError('Sign character found in converter base digits.')
def __repr__(self):
return "<%s: base%s (%s)>" % (self.__class__.__name__, len(self.digits), self.digits)
def encode(self, i):
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
if neg:
return self.sign + value
return value
def decode(self, s):
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
if neg:
value = '-' + value
return int(value)
def convert(self, number, from_digits, to_digits, sign):
if str(number)[0] == sign:
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(from_digits) + from_digits.index(digit)
# create the result in base 'len(to_digits)'
if x == 0:
res = to_digits[0]
else:
res = ''
while x > 0:
digit = x % len(to_digits)
res = to_digits[digit] + res
x = int(x // len(to_digits))
return neg, res
base2 = BaseConverter(BASE2_ALPHABET)
base16 = BaseConverter(BASE16_ALPHABET)
base36 = BaseConverter(BASE36_ALPHABET)
base56 = BaseConverter(BASE56_ALPHABET)
base62 = BaseConverter(BASE62_ALPHABET)
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
|
56d0d40102883c5bafe979301ba8088cb418b5cb3cbe018c712e0e429485c7e1 | import datetime
def _get_duration_components(duration):
days = duration.days
seconds = duration.seconds
microseconds = duration.microseconds
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
return days, hours, minutes, seconds, microseconds
def duration_string(duration):
"""Version of str(timedelta) which is not English specific."""
days, hours, minutes, seconds, microseconds = _get_duration_components(duration)
string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
if days:
string = '{} '.format(days) + string
if microseconds:
string += '.{:06d}'.format(microseconds)
return string
def duration_iso_string(duration):
if duration < datetime.timedelta(0):
sign = '-'
duration *= -1
else:
sign = ''
days, hours, minutes, seconds, microseconds = _get_duration_components(duration)
ms = '.{:06d}'.format(microseconds) if microseconds else ""
return '{}P{}DT{:02d}H{:02d}M{:02d}{}S'.format(sign, days, hours, minutes, seconds, ms)
def duration_microseconds(delta):
return (24 * 60 * 60 * delta.days + delta.seconds) * 1000000 + delta.microseconds
|
865ee996eacd1c2eed8fff4e95bec7efe2c2b4e84e57bd685a0fb1c793c1161e | "Commonly-used date structures"
from django.utils.translation import gettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
|
f2cb41c660dd151158aa3fe4a2310e1de4b8423b25a03914d18eb80619f239fb | """HTML utilities suitable for global use."""
import json
import re
from html.parser import HTMLParser
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION_CHARS = '.,:;!'
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
_html_escapes = {
ord('&'): '&',
ord('<'): '<',
ord('>'): '>',
ord('"'): '"',
ord("'"): ''',
}
@keep_lazy(str, SafeText)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(str(text).translate(_html_escapes))
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('`'): '\\u0060',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
_json_script_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
def json_script(value, element_id):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id, mark_safe(json_str)
)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', str(value))
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit-3 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so that it can be safely fed
to smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim trailing punctuation.
stripped = middle.rstrip(TRAILING_PUNCTUATION_CHARS)
if middle != stripped:
trail = middle[len(stripped):] + trail
middle = stripped
trimmed_something = True
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if '@' not in value or value.startswith('@') or value.endswith('@'):
return False
try:
p1, p2 = value.split('@')
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if '.' not in p2 or p2.startswith('.'):
return False
return True
words = word_split_re.split(str(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and is_email_simple(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
|
1201c435796c66f6ceac6782c2114cf3d7199897ba14cb201b70c4e597f79b55 | import logging
import logging.config # needed when logging_config doesn't start with logging.config
from copy import copy
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.core.management.color import color_style
from django.utils.module_loading import import_string
from django.views.debug import ExceptionReporter
request_logger = logging.getLogger('django.request')
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded (DEBUG=False) by means of the
# require_debug_true filter.
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[{server_time}] {message}',
'style': '{',
}
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
}
}
def configure_logging(logging_config, logging_settings):
if logging_config:
# First find the logging configuration function ...
logging_config_func = import_string(logging_config)
logging.config.dictConfig(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if logging_settings:
logging_config_func(logging_settings)
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
super().__init__()
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
subject = self.format_subject(subject)
# Since we add a nicely formatted traceback on our own, create a copy
# of the log record without the exception data.
no_exc_record = copy(record)
no_exc_record.exc_info = None
no_exc_record.exc_text = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text())
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters.
"""
return subject.replace('\n', '\\n').replace('\r', '\\r')
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
class ServerFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self.style = color_style()
super().__init__(*args, **kwargs)
def format(self, record):
msg = record.msg
status_code = getattr(record, 'status_code', None)
if status_code:
if 200 <= status_code < 300:
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif 100 <= status_code < 200:
msg = self.style.HTTP_INFO(msg)
elif status_code == 304:
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif 300 <= status_code < 400:
msg = self.style.HTTP_REDIRECT(msg)
elif status_code == 404:
msg = self.style.HTTP_NOT_FOUND(msg)
elif 400 <= status_code < 500:
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other status code
msg = self.style.HTTP_SERVER_ERROR(msg)
if self.uses_server_time() and not hasattr(record, 'server_time'):
record.server_time = self.formatTime(record, self.datefmt)
record.msg = msg
return super().format(record)
def uses_server_time(self):
return self._fmt.find('{server_time}') >= 0
def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exc_info=None):
"""
Log errors based on HttpResponse status.
Log 5xx responses as errors and 4xx responses as warnings (unless a level
is given as a keyword argument). The HttpResponse status_code and the
request are passed to the logger's extra parameter.
"""
# Check if the response has already been logged. Multiple requests to log
# the same response can be received in some cases, e.g., when the
# response is the result of an exception and is logged at the time the
# exception is caught so that the exc_info can be recorded.
if getattr(response, '_has_been_logged', False):
return
if level is None:
if response.status_code >= 500:
level = 'error'
elif response.status_code >= 400:
level = 'warning'
else:
level = 'info'
getattr(logger, level)(
message, *args,
extra={
'status_code': response.status_code,
'request': request,
},
exc_info=exc_info,
)
response._has_been_logged = True
|
cc8d054a843ea51b46cce3fcbe9c67f4178f1d7c93c4c361f617eb44d295e843 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import Promise, wraps
class SafeData:
def __html__(self):
"""
Return the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
Kept in Django 2.0 for usage by apps supporting Python 2. Shouldn't be used
in Django anymore.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
class SafeText(str, SafeData):
"""
A str subclass that has been specifically marked as "safe" for HTML output
purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe string with another safe byte string or
safe string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def __str__(self):
return self
SafeString = SafeText
def _safety_decorator(safety_marker, func):
@wraps(func)
def wrapped(*args, **kwargs):
return safety_marker(func(*args, **kwargs))
return wrapped
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string is appropriate.
If used on a method as a decorator, mark the returned data as safe.
Can be called multiple times on a single string.
"""
if hasattr(s, '__html__'):
return s
if isinstance(s, (str, Promise)):
return SafeText(s)
if callable(s):
return _safety_decorator(mark_safe, s)
return SafeText(str(s))
|
bdf9e6d7903abe0acbbe0ce129113da09bc785dd49bcab4329cd5189b11f0ae8 | """
Based on "python-archive" -- https://pypi.org/project/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <[email protected]> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import stat
import tarfile
import zipfile
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive:
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, str):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive:
"""
Base Archive class. Implementations should inherit this class.
"""
@staticmethod
def _copy_permissions(mode, filename):
"""
If the file in the archive has some permissions (this assumes a file
won't be writable/executable without being readable), apply those
permissions to the unarchived file.
"""
if mode & stat.S_IROTH:
os.chmod(filename, mode)
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Return True if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive).
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
members = self._archive.getmembers()
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
self._copy_permissions(member.mode, filename)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
info = self._archive.getinfo(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
# Convert ZipInfo.external_attr to mode
mode = info.external_attr >> 16
self._copy_permissions(mode, filename)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
|
3ff0522ec2130cfd995bd1093f2e9c8859de89bd22cfd7b3073d8b0fb0958911 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim '
'veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea '
'commodo consequat. Duis aute irure dolor in reprehenderit in voluptate '
'velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint '
'occaecat cupidatat non proident, sunt in culpa qui officia deserunt '
'mollit anim id est laborum.'
)
WORDS = (
'exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti',
)
COMMON_WORDS = (
'lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua',
)
def sentence():
"""
Return a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Return a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join(sentence() for i in range(random.randint(1, 4)))
def paragraphs(count, common=True):
"""
Return a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Return a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
word_list = list(COMMON_WORDS) if common else []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
|
8044614cdaa00bb42b078689098a52663446c1173ebea64e51a3c0f8b8adc350 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
import datetime
import email
from io import StringIO
from urllib.parse import urlparse
from django.utils import datetime_safe
from django.utils.encoding import iri_to_uri
from django.utils.timezone import utc
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.combine(date, datetime.time())
return email.utils.format_datetime(date)
def rfc3339_date(date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.combine(date, datetime.time())
return date.isoformat() + ('Z' if date.utcoffset() is None else '')
def get_tag_uri(url, date):
"""
Create a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed:
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_str(s):
return str(s) if s is not None else s
categories = categories and [str(c) for c in categories]
self.feed = {
'title': to_str(title),
'link': iri_to_uri(link),
'description': to_str(description),
'language': to_str(language),
'author_email': to_str(author_email),
'author_name': to_str(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_str(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_str(feed_copyright),
'id': feed_guid or link,
'ttl': to_str(ttl),
**kwargs,
}
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, categories=(),
item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs):
"""
Add an item to the feed. All args are expected to be strings except
pubdate and updateddate, which are datetime.datetime objects, and
enclosures, which is an iterable of instances of the Enclosure class.
"""
def to_str(s):
return str(s) if s is not None else s
categories = categories and [to_str(c) for c in categories]
self.items.append({
'title': to_str(title),
'link': iri_to_uri(link),
'description': to_str(description),
'author_email': to_str(author_email),
'author_name': to_str(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_str(comments),
'unique_id': to_str(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures or (),
'categories': categories or (),
'item_copyright': to_str(item_copyright),
'ttl': to_str(ttl),
**kwargs,
})
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Output the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Return the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Return the latest item's pubdate or updateddate. If no items
have either of these attributes this return the current UTC date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now(utc)
class Enclosure:
"""An RSS enclosure"""
def __init__(self, url, length, mime_type):
"All args are expected to be strings"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
)
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
|
85c3bfeea6acb122397537d0e423edb40f2cddff72685f149ea28a99addb23a3 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, path=None):
"""
Class decorator that allows the decorated class to be serialized
by the migrations subsystem.
The `path` kwarg specifies the import path.
"""
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Return a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args)
|
c52436fa4c3568a79d97a6d442ff7826c282204f0355633c5368e33bc5c71e9c | import copy
import itertools
import operator
from functools import total_ordering, wraps
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*args, *moreargs, **{**kwargs, **morekwargs})
return _curried
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise:
"""
Base class for the proxy class created in the closure of the lazy function.
It's used to recognize promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turn any callable into a lazy evaluated callable. result classes or types
is required -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__:
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = str in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
cls.__str__ = cls.__text_cast
elif cls._delegate_bytes:
cls.__bytes__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode()
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_text:
return str(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def lazystr(text):
"""
Shortcut for the common case of a lazy callable that returns str.
"""
return lazy(str, str)(text)
def keep_lazy(*resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
if not resultclasses:
raise TypeError("You must pass at least one argument to keep_lazy().")
def decorator(func):
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
if any(isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values())):
return lazy_func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
return decorator
def keep_lazy_text(func):
"""
A decorator for functions that accept lazy arguments and return text.
"""
return keep_lazy(str)(func)
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject:
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
super().__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
def partition(predicate, values):
"""
Split the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
|
58192665776d6c81e071a5832a6e04951bf3caeff028b096d9a035f20d51089a | import ipaddress
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Clean an IPv6 address string.
Raise ValidationError if the address is invalid.
Replace the longest continuous zero-sequence with "::", remove leading
zeroes, and make sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Return a compressed IPv6 address or the same value.
"""
try:
addr = ipaddress.IPv6Address(int(ipaddress.IPv6Address(ip_str)))
except ValueError:
raise ValidationError(error_message, code='invalid')
if unpack_ipv4 and addr.ipv4_mapped:
return str(addr.ipv4_mapped)
elif addr.ipv4_mapped:
return '::ffff:%s' % str(addr.ipv4_mapped)
return str(addr)
def is_valid_ipv6_address(ip_str):
"""
Return whether or not the `ip_str` string is a valid IPv6 address.
"""
try:
ipaddress.IPv6Address(ip_str)
except ValueError:
return False
return True
|
95a7080e37338716f01b838dfca7c6d47e9534f3863a96d18675616de768d826 | def is_iterable(x):
"An implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
|
8441023214f310ad94641abb489001b3cbfe2fafd023e54a691cfec4743ce66b | import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if isinstance(value, (str, bytes)):
query_val = value
else:
try:
iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, even when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = [
item if isinstance(item, bytes) else str(item)
for item in value
]
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
warnings.warn(
'cookie_date() is deprecated in favor of http_date(), which follows '
'the format of the latest RFC.',
RemovedInDjango30Warning, stacklevel=2,
)
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
|
9bd696429cb7df006c7c1c7ac5415bc2533f60e74169a5d8e05975eeaebed93f | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
https://tools.ietf.org/html/rfc7231#section-7.1.4
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.log import log_response
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.get('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming:
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
'Precondition Failed: %s', request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response[header] = response[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (not if_none_match_etags and if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since)):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
cache_key += '.%s' % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(force_bytes(value))
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
71b70d0fb05145c037156f4e5ca47465455c3e2a786c7cc33035e302ad5bae8a | import datetime
import decimal
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, datetime_safe, numberformat
from django.utils.functional import lazy
from django.utils.translation import (
check_for_language, get_language, to_locale,
)
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
'DATETIME_INPUT_FORMATS': [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
],
}
FORMAT_SETTINGS = frozenset([
'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR',
'NUMBER_GROUPING',
'FIRST_DAY_OF_WEEK',
'MONTH_DAY_FORMAT',
'TIME_FORMAT',
'DATE_FORMAT',
'DATETIME_FORMAT',
'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT',
'YEAR_MONTH_FORMAT',
'DATE_INPUT_FORMATS',
'TIME_INPUT_FORMATS',
'DATETIME_INPUT_FORMATS',
])
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""Find format modules."""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, str):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""Return a list of the format modules found."""
if lang is None:
lang = get_language()
if lang not in _format_modules_cache:
_format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))
modules = _format_modules_cache[lang]
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N)
if use_l10n and lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
return _format_cache[cache_key]
except KeyError:
pass
# The requested format_type has not been cached yet. Try to find it in any
# of the format_modules for the given lang if l10n is enabled. If it's not
# there or if l10n is disabled, fall back to the project settings.
val = None
if use_l10n:
for module in get_format_modules(lang):
val = getattr(module, format_type, None)
if val is not None:
break
if val is None:
if format_type not in FORMAT_SETTINGS:
return format_type
val = getattr(settings, format_type)
elif format_type in ISO_INPUT_FORMATS:
# If a list of input formats from one of the format_modules was
# retrieved, make sure the ISO_INPUT_FORMATS are in this list.
val = list(val)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
val.append(iso_input)
_format_cache[cache_key] = val
return val
get_format_lazy = lazy(get_format, str, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Format a datetime.date or datetime.datetime object using a
localizable format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Format a datetime.time object using a localizable format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Format a numeric value using localization settings.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
return value
def localize_input(value, default=None):
"""
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Don't treat booleans as numbers.
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = default or get_format('DATETIME_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = default or get_format('DATE_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.time):
format = default or get_format('TIME_INPUT_FORMATS')[0]
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if isinstance(value, str):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
# Special case where we suspect a dot meant decimal separator (see #22171)
pass
else:
for replacement in {
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value
|
8ce7acd247d9a0820fc4f30d5a45cc27a0f7c777fde41e5c656297a37f1185c7 | """JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
import re
class Tok:
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer:
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yield pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super().__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
|
098c66d49bb65f96e5d9f28ab18c8281e99676fe8b3e11220e4695dedff37f7d | import os
import tempfile
from os.path import abspath, dirname, join, normcase, sep
from django.core.exceptions import SuspiciousFileOperation
from django.utils.encoding import force_text
# For backwards-compatibility in Django 2.0
abspathu = abspath
def upath(path):
"""Always return a unicode path (did something for Python 2)."""
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2. Noop for Python 3.
"""
return path
def safe_join(base, *paths):
"""
Join one or more path components to the base path component intelligently.
Return a normalized, absolute version of the final path.
Raise ValueError if the final path isn't located inside of the base path
component.
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspath(join(base, *paths))
base_path = abspath(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
Return whether or not creating symlinks are supported in the host platform
and/or if they are allowed to be created (e.g. on Windows it requires admin
permissions).
"""
with tempfile.TemporaryDirectory() as temp_dir:
original_path = os.path.join(temp_dir, 'original')
symlink_path = os.path.join(temp_dir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError):
supported = False
return supported
|
26bcac6018c2ef6a2ad1604be84278e00f191228ff4bb42c8afa1142e8313c49 | """
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Return your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Return the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.items():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Return a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a palette definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the palette
definition will augment the base palette definition.
Valid roles:
'error', 'success', 'warning', 'notice', 'sql_field', 'sql_coltype',
'sql_keyword', 'sql_table', 'http_info', 'http_success',
'http_redirect', 'http_not_modified', 'http_bad_request',
'http_not_found', 'http_server_error', 'migrate_heading',
'migrate_label'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal', 'noreset'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict)
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
|
b79a274f6e833929f2e108bb8b3fdaf36339589c9dbcfa646fa8458c4d35d021 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, Template, Variable,
)
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
|
fade5c4e7589af1b52dafca0f5c82ecf8ad7c654c19e2e17a0952ea8936094f2 | from . import engines
from .exceptions import TemplateDoesNotExist
def get_template(template_name, using=None):
"""
Load and return a template for the given name.
Raise TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
raise TemplateDoesNotExist(template_name, chain=chain)
def select_template(template_name_list, using=None):
"""
Load and return a template for one of the given names.
Try names in order and return the first template found.
Raise TemplateDoesNotExist if no such template exists.
"""
if isinstance(template_name_list, str):
raise TypeError(
'select_template() takes an iterable of template names but got a '
'string: %r. Use get_template() if you want to load a single '
'template by name.' % template_name_list
)
chain = []
engines = _engine_list(using)
for template_name in template_name_list:
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
if template_name_list:
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def render_to_string(template_name, context=None, request=None, using=None):
"""
Load a template and render it with a context. Return a string.
template_name may be a string or a list of strings.
"""
if isinstance(template_name, (list, tuple)):
template = select_template(template_name, using=using)
else:
template = get_template(template_name, using=using)
return template.render(context, request)
def _engine_list(using=None):
return engines.all() if using is None else [engines[using]]
|
6b077b07cd31845079ef82edd88748c870a90fa2861b2b8a1a42286a5afd75ea | """
This module contains generic exceptions used by template backends. Although,
due to historical reasons, the Django template language also internally uses
these exceptions, other exceptions specific to the DTL should not be added
here.
"""
class TemplateDoesNotExist(Exception):
"""
The exception used when a template does not exist. Optional arguments:
backend
The template backend class used when raising this exception.
tried
A list of sources that were tried when finding the template. This
is formatted as a list of tuples containing (origin, status), where
origin is an Origin object or duck type and status is a string with the
reason the template wasn't found.
chain
A list of intermediate TemplateDoesNotExist exceptions. This is used to
encapsulate multiple exceptions when loading templates from multiple
engines.
"""
def __init__(self, msg, tried=None, backend=None, chain=None):
self.backend = backend
if tried is None:
tried = []
self.tried = tried
if chain is None:
chain = []
self.chain = chain
super().__init__(msg)
class TemplateSyntaxError(Exception):
"""
The exception used for syntax errors during parsing or rendering.
"""
pass
|
741abdc10bd812f0ad1eaa5e2ba0e2c3ec9891c4296d50c878bdb69dc9082374 | from django.http import HttpResponse
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super().__init__('', content_type, status, charset=charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""
Raise an exception if trying to pickle an unrendered response. Pickle
only rendered data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"""Accept a template object, path-to-template, or list of paths."""
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, str):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Return the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Add a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Render (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Return the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be iterated over.'
)
return super().__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError(
'The response content must be rendered before it can be accessed.'
)
return super().content
@content.setter
def content(self, value):
"""Set the content for the response."""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super().__init__(template, context, content_type, status, charset, using)
self._request = request
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.