hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
318924067fcfc9593e43b1c87742c377de47ae613e789ae4adbb12c85dcc88dd | """Implements the Astropy TestRunner which is a thin wrapper around pytest."""
import copy
import glob
import inspect
import os
import shlex
import sys
import tempfile
import warnings
from collections import OrderedDict
from functools import wraps
from importlib.util import find_spec
from astropy.config.paths import set_temp_cache, set_temp_config
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
__all__ = ["TestRunner", "TestRunnerBase", "keyword"]
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
:class:`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == "keyword", functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += " " * 8
doc_keywords += func.__doc__.strip()
doc_keywords += "\n\n"
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError(
f"run_tests() got an unexpected keyword argument {keyword}"
)
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError(f"{keyword} keyword method must return a list")
args += result
return args
RUN_TESTS_DOCSTRING = """
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependencies = [
"pytest",
"pytest_remotedata",
"pytest_doctestplus",
"pytest_astropy_header",
]
_missing_dependancy_error = (
"Test dependencies are missing: {module}. You should install the "
"'pytest-astropy' package (you may need to update the package if you "
"have a previous version installed, e.g., "
"'pip install pytest-astropy --upgrade' or the equivalent with conda)."
)
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies.
for module in cls._required_dependencies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error.format(module=module))
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# running pytest, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop("add_local_eggs_to_path", False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join(".eggs", "*.egg")):
sys.path.insert(0, egg)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError(
f"run_tests() got an unexpected keyword argument {wrong_kwargs[0]}"
)
args = self._generate_args(**kwargs)
if kwargs.get("plugins", None) is not None:
plugins = kwargs.pop("plugins")
elif self.keywords.get("plugins", None) is not None:
plugins = self.keywords["plugins"]
else:
plugins = []
# Override the config locations to not make a new directory nor use
# existing cache or config. Note that we need to do this here in
# addition to in conftest.py - for users running tests interactively
# in e.g. IPython, conftest.py would get read in too late, so we need
# to do it here - but at the same time the code here doesn't work when
# running tests in parallel mode because this uses subprocesses which
# don't know about the temporary config/cache.
astropy_config = tempfile.mkdtemp("astropy_config")
astropy_cache = tempfile.mkdtemp("astropy_cache")
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to pytest are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`~astropy.tests.runner.TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ("__doc__",))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, "__wrapped__"):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests.
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of strings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(base_path, package.replace(".", os.path.sep))
if not os.path.isdir(path):
info = {"name": package, "path": path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning,
)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = "package to test is not found: {name} (at path {path})."
self.package_path = self.packages_path(
package, self.base_path, error=error_message
)
if not kwargs["test_path"]:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs["package"], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in (".rst", ""):
if kwargs["docs_path"] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path specified."
)
abs_docs_path = os.path.abspath(kwargs["docs_path"])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path)
)
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append("--doctest-rst")
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (
os.path.isdir(test_path) or (".py" in test_path or ".rst" in test_path)
):
raise ValueError(
"Test path must be a directory or a path to a .py or .rst file"
)
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith("win"))
return []
@keyword(default_value=[])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from pytest. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ["-v"]
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on pytest pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ["failed", "all"]:
return [f"--pastebin={pastebin}"]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value="none")
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = "any"
elif remote_data is False:
remote_data = "none"
elif remote_data not in ("none", "astropy", "any"):
warnings.warn(
"The remote_data option should be one of "
f"none/astropy/any (found {remote_data}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.",
AstropyDeprecationWarning,
)
remote_data = "any"
return [f"--remote-data={remote_data}"]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # noqa: F401
except ImportError:
raise ImportError(
"PEP8 checking requires pytest-pep8 plugin: "
"https://pypi.org/project/pytest-pep8"
)
else:
return ["--pep8", "-k", "pep8"]
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ["--pdb"]
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa: F401
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package"
)
return ["-n", str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs["skip_docs"]:
if kwargs["package"] is not None:
warning_message = (
"Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist."
)
paths = self.packages_path(
kwargs["package"], docs_path, warning=warning_message
)
elif not kwargs["test_path"]:
paths = [docs_path]
if len(paths) and not kwargs["test_path"]:
paths.append("--doctest-rst")
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return [f"--repeat={repeat}"]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # noqa: F401
return super().run_tests(**kwargs)
|
30666c09f3619177079b6a99aaf65bca8d056bb4edae82ec886201a7e133bd64 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.shapes import NDArrayShapeMethods
from .function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
__all__ = ["Masked", "MaskedNDArray"]
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type(
"Masked" + data_cls.__name__,
(data_cls, base_cls),
{},
data_cls=data_cls,
)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, "mask", None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError("cannot handle np.ma.masked here.") from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (
tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks),
)
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, "dtype", None)
mask_dtype = (
np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names
else np.dtype("?")
)
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if "info" in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
out["data"] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = masked_array.mask
elif method == "null_value":
out["data"] = np.ma.MaskedArray(
masked_array.unmasked, mask=masked_array.mask
)
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault("mask", getattr(map["data"], "mask", False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault("__class__", data_cls.__module__ + "." + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if "__new__" not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if "info" not in cls.__dict__ and hasattr(cls._data_cls, "info"):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {"serialize_method"}
new_info = type(
cls.__name__ + "Info",
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names),
)
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (
isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray)
):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (
dtype.itemsize == self.dtype.itemsize
and (dtype.names is None or len(dtype.names) == len(self.dtype.names))
):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
"with a different number of fields or size."
)
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, "_mask", False))
if "info" in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if "could not broadcast" in exc.args[0]:
raise AttributeError(
"Incompatible shape for in-place modification. "
"Use `.reshape()` to make a copy with the desired "
"shape."
) from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method("__eq__")
_ne_simple = _comparison_method("__ne__")
__lt__ = _comparison_method("__lt__")
__le__ = _comparison_method("__le__")
__gt__ = _comparison_method("__gt__")
__ge__ = _comparison_method("__ge__")
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] == other[field] for field in self.dtype.names], axis=-1
)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] != other[field] for field in self.dtype.names], axis=-1
)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None):
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy()
else:
np.copyto(out, masks[0])
return out
out = np.logical_or(masks[0], masks[1], out=out)
for mask in masks[2:]:
np.logical_or(out, mask, out=out)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError("cannot write to unmasked output")
elif out_mask is None:
out_mask = m
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if "axes" in kwargs:
raise NotImplementedError(
"Masked does not yet support gufunc calls with 'axes'."
)
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1
else np.logical_or.reduce(mask1)
)
mask = self._combine_masks(masks, out=out_mask)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(" ", "")
)
axis = kwargs.get("axis", -1)
keepdims = kwargs.get("keepdims", False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims
)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == "__call__":
# Regular ufunc call.
mask = self._combine_masks(masks, out=out_mask)
elif method == "outer":
# Must have two arguments; adjust masks as will be done for data.
assert len(masks) == 2
masks = [(m if m is not None else False) for m in masks]
mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)
elif method in {"reduce", "accumulate"}:
# Reductions like np.add.reduce (sum).
if masks[0] is not None:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
# TODO: take care of 'out' too?
if method == "reduce":
axis = kwargs.get("axis", None)
keepdims = kwargs.get("keepdims", False)
where = kwargs.get("where", True)
mask = np.logical_or.reduce(
masks[0],
where=where,
axis=axis,
keepdims=keepdims,
out=out_mask,
)
if where is not True:
# Mask also whole rows that were not selected by where,
# so would have been left as unmasked above.
mask |= np.logical_and.reduce(
masks[0], where=where, axis=axis, keepdims=keepdims
)
else:
# Accumulate
axis = kwargs.get("axis", 0)
mask = np.logical_or.accumulate(masks[0], axis=axis, out=out_mask)
elif out is not None:
mask = False
else: # pragma: no cover
# Can only get here if neither input nor output was masked, but
# perhaps axis or where was masked (in NUMPY_LT_1_21 this is
# possible). We don't support this.
return NotImplemented
elif method in {"reduceat", "at"}: # pragma: no cover
# TODO: implement things like np.add.accumulate (used for cumsum).
raise NotImplementedError(
"masked instances cannot yet deal with 'reduceat' or 'at'."
)
if out_unmasked is not None:
kwargs["out"] = out_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked) for t in types):
raise TypeError(
"the MaskedNDArray implementation cannot handle {} "
"with the given arguments.".format(function)
) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(
self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out)
)
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if "where" not in kwargs:
kwargs["where"] = ~self.mask
if initial_func is not None and "initial" not in kwargs:
kwargs["initial"] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmax)
)
def max(self, axis=None, out=None, **kwargs):
return super().max(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmin)
)
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError("cannot yet give output")
return self._apply("compress", condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply("repeat", repeats, axis=axis)
def choose(self, choices, out=None, mode="raise"):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
if NUMPY_LT_1_22:
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
else:
def argmin(self, axis=None, out=None, *, keepdims=False):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, out=None, *, keepdims=False):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError("Cannot specify order when the array has no fields.")
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype("f4")
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
# catch the case when an axis is fully masked to prevent div by zero:
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
neq0 = n == 0
n += neq0
result /= n
# correct fully-masked slice results to what is expected for 0/0 division
result.unmasked[neq0] = np.nan
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where_final
)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= n == 0
return result
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
result = self.var(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError("can only get existing field from structured dtype.")
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError("can only set existing field from structured dtype.")
|
269ee2a914a14449c94ac559548613208a25862663a31a61fb9b4dd7d3538be6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os
import os.path
import textwrap
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from astropy.utils.diff import (
diff_values,
fixed_width_indent,
report_diff_values,
where_not_allclose,
)
from astropy.utils.misc import NOT_OVERWRITING_MSG
from .card import BLANK_CARD, Card
# HDUList is used in one of the doctests
from .hdu.hdulist import HDUList, fitsopen # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from .header import Header
from .util import path_like
__all__ = [
"FITSDiff",
"HDUDiff",
"HeaderDiff",
"ImageDataDiff",
"RawDataDiff",
"TableDataDiff",
]
# Column attributes of interest for comparison
_COL_ATTRS = [
("unit", "units"),
("null", "null values"),
("bscale", "bscales"),
("bzero", "bzeros"),
("disp", "display formats"),
("dim", "dimensions"),
]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are different. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(
getattr(self, attr) for attr in self.__dict__ if attr.startswith("diff_")
)
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like, string, or None, optional
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
if os.path.exists(fileobj) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, "w")
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + "\n")
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(
self,
a,
b,
ignore_hdus=[],
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, (str, os.PathLike)):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError(f"error opening file a ({a})") from exc
close_a = True
else:
close_a = False
if isinstance(b, (str, os.PathLike)):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError(f"error opening file b ({b})") from exc
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = {k.upper() for k in ignore_hdus}
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != "*" and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = f"<{self.a.__class__.__name__} object at {id(self.a):#x}>"
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = f"<{self.b.__class__.__name__} object at {id(self.b):#x}>"
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
a_ignored = fnmatch.filter(a_names, pattern)
self.a = HDUList([h for h in self.a if h.name not in a_ignored])
b_ignored = fnmatch.filter(b_names, pattern)
self.b = HDUList([h for h in self.b if h.name not in b_ignored])
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
if (
self.a[idx].name == self.b[idx].name
and self.a[idx].ver == self.b[idx].ver
):
self.diff_hdus.append(
(idx, hdu_diff, self.a[idx].name, self.a[idx].ver)
)
else:
self.diff_hdus.append((idx, hdu_diff, "", self.a[idx].ver))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
self._fileobj.write("\n")
self._writeln(f" fitsdiff: {__version__}")
self._writeln(f" a: {self.filenamea}\n b: {self.filenameb}")
if self.ignore_hdus:
ignore_hdus = " ".join(sorted(self.ignore_hdus))
self._writeln(" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdus))
if self.ignore_hdu_patterns:
ignore_hdu_patterns = " ".join(sorted(self.ignore_hdu_patterns))
self._writeln(
" HDU(s) not to be compared:\n" + wrapper.fill(ignore_hdu_patterns)
)
if self.ignore_keywords:
ignore_keywords = " ".join(sorted(self.ignore_keywords))
self._writeln(
" Keyword(s) not to be compared:\n" + wrapper.fill(ignore_keywords)
)
if self.ignore_comments:
ignore_comments = " ".join(sorted(self.ignore_comments))
self._writeln(
" Keyword(s) whose comments are not to be compared:\n"
+ wrapper.fill(ignore_comments)
)
if self.ignore_fields:
ignore_fields = " ".join(sorted(self.ignore_fields))
self._writeln(
" Table column(s) not to be compared:\n" + wrapper.fill(ignore_fields)
)
self._writeln(
f" Maximum number of different data values to be reported: {self.numdiffs}"
)
self._writeln(
f" Relative tolerance: {self.rtol}, Absolute tolerance: {self.atol}"
)
if self.diff_hdu_count:
self._fileobj.write("\n")
self._writeln("Files contain different numbers of HDUs:")
self._writeln(f" a: {self.diff_hdu_count[0]}")
self._writeln(f" b: {self.diff_hdu_count[1]}")
if not self.diff_hdus:
self._writeln("No differences found between common HDUs.")
return
elif not self.diff_hdus:
self._fileobj.write("\n")
self._writeln("No differences found.")
return
for idx, hdu_diff, extname, extver in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write("\n")
self._writeln("Primary HDU:")
else:
self._fileobj.write("\n")
if extname:
self._writeln(f"Extension HDU {idx} ({extname}, {extver}):")
else:
self._writeln(f"Extension HDU {idx}:")
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
ignore_fields=[],
numdiffs=10,
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get("XTENSION") != self.b.header.get("XTENSION"):
self.diff_extension_types = (
self.a.header.get("XTENSION"),
self.b.header.get("XTENSION"),
)
self.diff_headers = HeaderDiff.fromdiff(
self, self.a.header.copy(), self.b.header.copy()
)
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif isinstance(self.a, _TableLikeHDU) and isinstance(self.b, _TableLikeHDU):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data, self.b.data)
# Clean up references to (possibly) memmapped arrays so they can
# be closed by .close()
self.diff_data.a = None
self.diff_data.b = None
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(
" Extension types differ:\n a: {}\n b: {}".format(
*self.diff_extension_types
)
)
if self.diff_extnames:
self._writeln(
" Extension names differ:\n a: {}\n b: {}".format(*self.diff_extnames)
)
if self.diff_extvers:
self._writeln(
" Extension versions differ:\n a: {}\n b: {}".format(
*self.diff_extvers
)
)
if self.diff_extlevels:
self._writeln(
" Extension levels differ:\n a: {}\n b: {}".format(
*self.diff_extlevels
)
)
if not self.diff_headers.identical:
self._fileobj.write("\n")
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write("\n")
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(
self,
a,
b,
ignore_keywords=[],
ignore_comments=[],
rtol=0.0,
atol=0.0,
ignore_blanks=True,
ignore_blank_cards=True,
):
"""
Parameters
----------
a : `~astropy.io.fits.Header` or string or bytes
A header.
b : `~astropy.io.fits.Header` or string or bytes
A header to compare to the first header.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != "*" and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError(
"HeaderDiff can only diff astropy.io.fits.Header "
"objects or strings containing FITS headers."
)
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa, pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb, pattern))
if "*" in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if "*" in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(" Headers have different number of cards:")
self._writeln(f" a: {self.diff_keyword_count[0]}")
self._writeln(f" b: {self.diff_keyword_count[1]}")
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in a: {val!r}")
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(f" Extra keyword {keyword!r:8} in b: {val!r}")
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(f" Inconsistent duplicates of keyword {keyword!r:8}:")
self._writeln(
" Occurs {} time(s) in a, {} times in (b)".format(*count)
)
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(
self._fileobj,
"values",
self.diff_keyword_values,
keyword,
ind=self._indent,
)
report_diff_keyword_attr(
self._fileobj,
"comments",
self.diff_keyword_comments,
keyword,
ind=self._indent,
)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (
np.issubdtype(self.a.dtype, np.inexact)
or np.issubdtype(self.b.dtype, np.inexact)
):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [
(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)
]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = " x ".join(str(d) for d in reversed(self.diff_dimensions[0]))
dimsb = " x ".join(str(d) for d in reversed(self.diff_dimensions[1]))
self._writeln(" Data dimensions differ:")
self._writeln(f" a: {dimsa}")
self._writeln(f" b: {dimsb}")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f" Data differs at {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different pixels found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (
self.diff_dimensions[0][0],
self.diff_dimensions[1][0],
)
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(" Data sizes differ:")
self._writeln(f" a: {self.diff_dimensions[0]} bytes")
self._writeln(f" b: {self.diff_dimensions[1]} bytes")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f" Data differs at byte {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
self._writeln(" ...")
self._writeln(
" {} different bytes found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if "*" in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(
colsa_set.intersection(colsb_set), key=operator.attrgetter("name")
)
self.common_column_names = {col.name.lower() for col in self.common_columns}
left_only_columns = {
col.name.lower(): col for col in colsa_set.difference(colsb_set)
}
right_only_columns = {
col.name.lower(): col for col in colsb_set.difference(colsa_set)
}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb))
)
arra = self.a[col.name]
arrb = self.b[col.name]
if np.issubdtype(arra.dtype, np.floating) and np.issubdtype(
arrb.dtype, np.floating
):
diffs = where_not_allclose(arra, arrb, rtol=self.rtol, atol=self.atol)
elif "P" in col.format:
diffs = (
[
idx
for idx in range(len(arra))
if not np.allclose(
arra[idx], arrb[idx], rtol=self.rtol, atol=self.atol
)
],
)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx), (arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(" Tables have different number of columns:")
self._writeln(f" a: {self.diff_column_count[0]}")
self._writeln(f" b: {self.diff_column_count[1]}")
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in a")
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(f" Extra column {name} of format {format} in b")
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(f" Column {name} has different {col_attrs[attr]}:")
report_diff_values(
vals[0],
vals[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_rows:
self._writeln(" Table rows differ:")
self._writeln(f" a: {self.diff_rows[0]}")
self._writeln(f" b: {self.diff_rows[1]}")
self._writeln(" No further data comparison performed.")
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(" Column {} data differs in row {}:".format(*indx))
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(
f" ...{self.diff_total - self.numdiffs} additional difference(s) found."
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
" {} different table data element(s) found ({:.2%} different).".format(
self.diff_total, self.diff_ratio
)
)
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ""
else:
dup = f"[{idx + 1}]"
fileobj.write(
fixed_width_indent(
f" Keyword {keyword:8}{dup} has different {attr}:\n",
ind,
)
)
report_diff_values(val[0], val[1], fileobj=fileobj, indent_width=ind + 1)
|
eedd6666bb2b6cbfa7163ca6b5d066489f31150ebd1f3a34d0b12497f457c238 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import warnings
import weakref
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import lazyproperty
from .column import (
_VLF,
ASCII2NUMPY,
ASCII2STR,
ASCIITNULL,
FITS2NUMPY,
ColDefs,
Delayed,
_AsciiColDefs,
_FormatP,
_FormatX,
_get_index,
_makep,
_unwrapx,
_wrapx,
)
from .util import _rstrip_inplace, decode_ascii, encode_ascii
class FITS_record:
"""
FITS record class.
`FITS_record` is used to access records of the `FITS_rec` object.
This will allow us to deal with scaled columns. It also handles
conversion/scaling of columns in ASCII tables. The `FITS_record`
class expects a `FITS_rec` object as input.
"""
def __init__(
self, input, row=0, start=None, end=None, step=None, base=None, **kwargs
):
"""
Parameters
----------
input : array
The array to wrap.
row : int, optional
The starting logical row of the array.
start : int, optional
The starting column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
end : int, optional
The ending column in the row associated with this object.
Used for subsetting the columns of the `FITS_rec` object.
"""
self.array = input
self.row = row
if base:
width = len(base)
else:
width = self.array._nfields
s = slice(start, end, step).indices(width)
self.start, self.end, self.step = s
self.base = base
def __getitem__(self, key):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
return type(self)(self.array, self.row, key.start, key.stop, key.step, self)
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
return self.array.field(indx)[self.row]
def __setitem__(self, key, value):
if isinstance(key, str):
indx = _get_index(self.array.names, key)
if indx < self.start or indx > self.end - 1:
raise KeyError(f"Key '{key}' does not exist.")
elif isinstance(key, slice):
for indx in range(slice.start, slice.stop, slice.step):
indx = self._get_indx(indx)
self.array.field(indx)[self.row] = value
else:
indx = self._get_index(key)
if indx > self.array._nfields - 1:
raise IndexError("Index out of bounds")
self.array.field(indx)[self.row] = value
def __len__(self):
return len(range(self.start, self.end, self.step))
def __repr__(self):
"""
Display a single row.
"""
outlist = []
for idx in range(len(self)):
outlist.append(repr(self[idx]))
return f"({', '.join(outlist)})"
def field(self, field):
"""
Get the field data of the record.
"""
return self.__getitem__(field)
def setfield(self, field, value):
"""
Set the field data of the record.
"""
self.__setitem__(field, value)
@lazyproperty
def _bases(self):
bases = [weakref.proxy(self)]
base = self.base
while base:
bases.append(base)
base = base.base
return bases
def _get_index(self, indx):
indices = np.ogrid[: self.array._nfields]
for base in reversed(self._bases):
if base.step < 1:
s = slice(base.start, None, base.step)
else:
s = slice(base.start, base.end, base.step)
indices = indices[s]
return indices[indx]
class FITS_rec(np.recarray):
"""
FITS record array class.
`FITS_rec` is the data part of a table HDU's data part. This is a layer
over the `~numpy.recarray`, so we can deal with scaled columns.
It inherits all of the standard methods from `numpy.ndarray`.
"""
_record_type = FITS_record
_character_as_bytes = False
def __new__(subtype, input):
"""
Construct a FITS record array from a recarray.
"""
# input should be a record array
if input.dtype.subdtype is None:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data
)
else:
self = np.recarray.__new__(
subtype, input.shape, input.dtype, buf=input.data, strides=input.strides
)
self._init()
if self.dtype.fields:
self._nfields = len(self.dtype.fields)
return self
def __setstate__(self, state):
meta = state[-1]
column_state = state[-2]
state = state[:-2]
super().__setstate__(state)
self._col_weakrefs = weakref.WeakSet()
for attr, value in zip(meta, column_state):
setattr(self, attr, value)
def __reduce__(self):
"""
Return a 3-tuple for pickling a FITS_rec. Use the super-class
functionality but then add in a tuple of FITS_rec-specific
values that get used in __setstate__.
"""
reconst_func, reconst_func_args, state = super().__reduce__()
# Define FITS_rec-specific attrs that get added to state
column_state = []
meta = []
for attrs in [
"_converted",
"_heapoffset",
"_heapsize",
"_nfields",
"_gap",
"_uint",
"parnames",
"_coldefs",
]:
with suppress(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
# picked, it needs to be deepcopied first
if attrs == "_coldefs":
column_state.append(self._coldefs.__deepcopy__(None))
else:
column_state.append(getattr(self, attrs))
meta.append(attrs)
state = state + (column_state, meta)
return reconst_func, reconst_func_args, state
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, FITS_rec):
self._character_as_bytes = obj._character_as_bytes
if isinstance(obj, FITS_rec) and obj.dtype == self.dtype:
self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._col_weakrefs = obj._col_weakrefs
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
elif self.dtype.fields is not None:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(self.dtype.fields)
self._converted = {}
self._heapoffset = getattr(obj, "_heapoffset", 0)
self._heapsize = getattr(obj, "_heapsize", 0)
self._gap = getattr(obj, "_gap", 0)
self._uint = getattr(obj, "_uint", False)
self._col_weakrefs = weakref.WeakSet()
self._coldefs = ColDefs(self)
# Work around chicken-egg problem. Column.array relies on the
# _coldefs attribute to set up ref back to parent FITS_rec; however
# in the above line the self._coldefs has not been assigned yet so
# this fails. This patches that up...
for col in self._coldefs:
del col.array
col._parent_fits_rec = weakref.ref(self)
else:
self._init()
def _init(self):
"""Initializes internal attributes specific to FITS-isms."""
self._nfields = 0
self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._col_weakrefs = weakref.WeakSet()
self._coldefs = None
self._gap = 0
self._uint = False
@classmethod
def from_columns(cls, columns, nrows=0, fill=False, character_as_bytes=False):
"""
Given a `ColDefs` object of unknown origin, initialize a new `FITS_rec`
object.
.. note::
This was originally part of the ``new_table`` function in the table
module but was moved into a class method since most of its
functionality always had more to do with initializing a `FITS_rec`
object than anything else, and much of it also overlapped with
``FITS_rec._scale_back``.
Parameters
----------
columns : sequence of `Column` or a `ColDefs`
The columns from which to create the table data. If these
columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns
will be used as a template for a new table with the requested
number of rows.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If
`False`, copy the data from input, undefined cells will still
be filled with zeros/blanks.
"""
if not isinstance(columns, ColDefs):
columns = ColDefs(columns)
# read the delayed data
for column in columns:
arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
column.array = None
else:
column.array = _get_recarray_field(arr.hdu.data, arr.field)
# Reset columns._arrays (which we may want to just do away with
# altogether
del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in columns._arrays:
if arr is not None:
dim = arr.shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
raw_data = np.empty(columns.dtype.itemsize * nrows, dtype=np.uint8)
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
data._character_as_bytes = character_as_bytes
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
# .data._coldefs, so this assignment was circular! Don't make that
# mistake again.
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
# zeros/blanks
if fill:
return data
# Otherwise we have to fill the recarray with data from the input
# columns
for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
arr = column.array
if arr is None:
array_size = 0
else:
array_size = len(arr)
n = min(array_size, nrows)
# TODO: At least *some* of this logic is mostly redundant with the
# _convert_foo methods in this class; see if we can eliminate some
# of that duplication.
if not n:
# The input column had an empty array, so just use the fill
# value
continue
field = _get_recarray_field(data, idx)
name = column.name
fitsformat = column.format
recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
if isinstance(recformat, _FormatX):
# Data is a bit array
if inarr.shape[-1] == recformat.repeat:
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
data._cache_field(name, _makep(inarr, field, recformat, nrows=nrows))
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
elif recformat[-2:] == FITS2NUMPY["L"] and inarr.dtype == bool:
# column is boolean
# The raw data field should be filled with either 'T' or 'F'
# (not 0). Use 'F' as a default
field[:] = ord("F")
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
converted = np.zeros(field.shape, dtype=bool)
converted[:n] = inarr
data._cache_field(name, converted)
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == np.False_, ord("F"), ord("T"))
elif columns[idx]._physical_values and columns[idx]._pseudo_unsigned_ints:
# Temporary hack...
bzero = column.bzero
converted = np.zeros(field.shape, dtype=inarr.dtype)
converted[:n] = inarr
data._cache_field(name, converted)
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
inarr = inarr - bzero
elif isinstance(columns, _AsciiColDefs):
# Regardless whether the format is character or numeric, if the
# input array contains characters then it's already in the raw
# format for ASCII tables
if fitsformat._pseudo_logical:
# Hack to support converting from 8-bit T/F characters
# Normally the column array is a chararray of 1 character
# strings, but we need to view it as a normal ndarray of
# 8-bit ints to fill it with ASCII codes for 'T' and 'F'
outarr = field.view(np.uint8, np.ndarray)[:n]
elif arr.dtype.kind not in ("S", "U"):
# Set up views of numeric columns with the appropriate
# numeric dtype
# Fill with the appropriate blanks for the column format
data._cache_field(name, np.zeros(nrows, dtype=arr.dtype))
outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
if (
inarr.dtype.kind == outarr.dtype.kind
and inarr.dtype.kind in ("U", "S")
and inarr.dtype != outarr.dtype
):
inarr_rowsize = inarr[0].size
inarr = inarr.flatten().view(outarr.dtype)
# This is a special case to handle input arrays with
# non-trivial TDIMn.
# By design each row of the outarray is 1-D, while each row of
# the input array may be n-D
if outarr.ndim > 1:
# The normal case where the first dimension is the rows
inarr_rowsize = inarr[0].size
inarr = inarr.reshape(n, inarr_rowsize)
outarr[:, :inarr_rowsize] = inarr
else:
# Special case for strings where the out array only has one
# dimension (the second dimension is rolled up into the
# strings
outarr[:n] = inarr.ravel()
else:
outarr[:] = inarr
# Now replace the original column array references with the new
# fields
# This is required to prevent the issue reported in
# https://github.com/spacetelescope/PyFITS/issues/99
for idx in range(len(columns)):
columns._arrays[idx] = data.field(idx)
return data
def __repr__(self):
# Force use of the normal ndarray repr (rather than the new
# one added for recarray in Numpy 1.10) for backwards compat
return np.ndarray.__repr__(self)
def __getattribute__(self, attr):
# First, see if ndarray has this attr, and return it if so. Note that
# this means a field with the same name as an ndarray attr cannot be
# accessed by attribute, this is Numpy's default behavior.
# We avoid using np.recarray.__getattribute__ here because after doing
# this check it would access the columns without doing the conversions
# that we need (with .field, see below).
try:
return object.__getattribute__(self, attr)
except AttributeError:
pass
# attr might still be a fieldname. If we have column definitions,
# we should access this via .field, as the data may have to be scaled.
if self._coldefs is not None and attr in self.columns.names:
return self.field(attr)
# If not, just let the usual np.recarray override deal with it.
return super().__getattribute__(attr)
def __getitem__(self, key):
if self._coldefs is None:
return super().__getitem__(key)
if isinstance(key, str):
return self.field(key)
# Have to view as a recarray then back as a FITS_rec, otherwise the
# circular reference fix/hack in FITS_rec.field() won't preserve
# the slice.
out = self.view(np.recarray)[key]
if type(out) is not np.recarray:
# Oops, we got a single element rather than a view. In that case,
# return a Record, which has no __getstate__ and is more efficient.
return self._record_type(self, key)
# We got a view; change it back to our class, and add stuff
out = out.view(type(self))
out._uint = self._uint
out._coldefs = ColDefs(self._coldefs)
arrays = []
out._converted = {}
for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
# Ensure that the sliced FITS_rec will view the same scaled
# columns as the original; this is one of the few cases where
# it is not necessary to use _cache_field()
if name in self._converted:
dummy = self._converted[name]
field = np.ndarray.__getitem__(dummy, key)
out._converted[name] = field
out._coldefs._arrays = arrays
return out
def __setitem__(self, key, value):
if self._coldefs is None:
return super().__setitem__(key, value)
if isinstance(key, str):
self[key][:] = value
return
if isinstance(key, slice):
end = min(len(self), key.stop or len(self))
end = max(0, end)
start = max(0, key.start or 0)
end = min(end, start + len(value))
for idx in range(start, end):
self.__setitem__(idx, value[idx - start])
return
if isinstance(value, FITS_record):
for idx in range(self._nfields):
self.field(self.names[idx])[key] = value.field(self.names[idx])
elif isinstance(value, (tuple, list, np.void)):
if self._nfields == len(value):
for idx in range(self._nfields):
self.field(idx)[key] = value[idx]
else:
raise ValueError(
f"Input tuple or list required to have {self._nfields} elements."
)
else:
raise TypeError(
"Assignment requires a FITS_record, tuple, or list as input."
)
def _ipython_key_completions_(self):
return self.names
def copy(self, order="C"):
"""
The Numpy documentation lies; `numpy.ndarray.copy` is not equivalent to
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
attributes (including ``._converted``!). So we need to make a deep
copy of all those attributes so that the two arrays truly do not share
any data.
"""
new = super().copy(order=order)
new.__dict__ = copy.deepcopy(self.__dict__)
return new
@property
def columns(self):
"""A user-visible accessor for the coldefs."""
return self._coldefs
@property
def _coldefs(self):
# This used to be a normal internal attribute, but it was changed to a
# property as a quick and transparent way to work around the reference
# leak bug fixed in https://github.com/astropy/astropy/pull/4539
#
# See the long comment in the Column.array property for more details
# on this. But in short, FITS_rec now has a ._col_weakrefs attribute
# which is a WeakSet of weakrefs to each Column in _coldefs.
#
# So whenever ._coldefs is set we also add each Column in the ColDefs
# to the weakrefs set. This is an easy way to find out if a Column has
# any references to it external to the FITS_rec (i.e. a user assigned a
# column to a variable). If the column is still in _col_weakrefs then
# there are other references to it external to this FITS_rec. We use
# that information in __del__ to save off copies of the array data
# for those columns to their Column.array property before our memory
# is freed.
return self.__dict__.get("_coldefs")
@_coldefs.setter
def _coldefs(self, cols):
self.__dict__["_coldefs"] = cols
if isinstance(cols, ColDefs):
for col in cols.columns:
self._col_weakrefs.add(col)
@_coldefs.deleter
def _coldefs(self):
try:
del self.__dict__["_coldefs"]
except KeyError as exc:
raise AttributeError(exc.args[0])
def __del__(self):
try:
del self._coldefs
if self.dtype.fields is not None:
for col in self._col_weakrefs:
if col.array is not None:
col.array = col.array.copy()
# See issues #4690 and #4912
except (AttributeError, TypeError): # pragma: no cover
pass
@property
def names(self):
"""List of column names."""
if self.dtype.fields:
return list(self.dtype.names)
elif getattr(self, "_coldefs", None) is not None:
return self._coldefs.names
else:
return None
@property
def formats(self):
"""List of column FITS formats."""
if getattr(self, "_coldefs", None) is not None:
return self._coldefs.formats
return None
@property
def _raw_itemsize(self):
"""
Returns the size of row items that would be written to the raw FITS
file, taking into account the possibility of unicode columns being
compactified.
Currently for internal use only.
"""
if _has_unicode_fields(self):
total_itemsize = 0
for field in self.dtype.fields.values():
itemsize = field[0].itemsize
if field[0].kind == "U":
itemsize = itemsize // 4
total_itemsize += itemsize
return total_itemsize
else:
# Just return the normal itemsize
return self.itemsize
def field(self, key):
"""
A view of a `Column`'s data as an array.
"""
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
column = self.columns[key]
name = column.name
format = column.format
if format.dtype.itemsize == 0:
warnings.warn(
"Field {!r} has a repeat count of 0 in its format code, "
"indicating an empty field.".format(key)
)
return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
# this can lead to a circular reference; see ticket #49
base = self
while isinstance(base, FITS_rec) and isinstance(base.base, np.recarray):
base = base.base
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
field = _get_recarray_field(base, name)
if name not in self._converted:
recformat = format.recformat
# TODO: If we're now passing the column to these subroutines, do we
# really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
converted = self._convert_other(column, field, recformat)
# Note: Never assign values directly into the self._converted dict;
# always go through self._cache_field; this way self._converted is
# only used to store arrays that are not already direct views of
# our own data.
self._cache_field(name, converted)
return converted
return self._converted[name]
def _cache_field(self, name, field):
"""
Do not store fields in _converted if one of its bases is self,
or if it has a common base with self.
This results in a reference cycle that cannot be broken since
ndarrays do not participate in cyclic garbage collection.
"""
base = field
while True:
self_base = self
while True:
if self_base is base:
return
if getattr(self_base, "base", None) is not None:
self_base = self_base.base
else:
break
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self._converted[name] = field
def _update_column_attribute_changed(self, column, idx, attr, old_value, new_value):
"""
Update how the data is formatted depending on changes to column
attributes initiated by the user through the `Column` interface.
Dispatches column attribute change notifications to individual methods
for each attribute ``_update_column_<attr>``
"""
method_name = f"_update_column_{attr}"
if hasattr(self, method_name):
# Right now this is so we can be lazy and not implement updaters
# for every attribute yet--some we may not need at all, TBD
getattr(self, method_name)(column, idx, old_value, new_value)
def _update_column_name(self, column, idx, old_name, name):
"""Update the dtype field names when a column name is changed."""
dtype = self.dtype
# Updating the names on the dtype should suffice
dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
FITS X format.
"""
dummy = np.zeros(self.shape + (recformat.repeat,), dtype=np.bool_)
_unwrapx(field, dummy, recformat.repeat)
return dummy
def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
if column.dim:
vla_shape = tuple(
reversed(tuple(map(int, column.dim.strip("()").split(","))))
)
dummy = _VLF([None] * len(self), dtype=recformat.dtype)
raw_data = self._get_raw_data()
if raw_data is None:
raise OSError(
"Could not find heap data for the {!r} variable-length "
"array column.".format(column.name)
)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
count = field[idx, 0]
if recformat.dtype == "a":
dt = np.dtype(recformat.dtype + str(1))
arr_len = count * dt.itemsize
da = raw_data[offset : offset + arr_len].view(dt)
da = np.char.array(da.view(dtype=dt), itemsize=count)
dummy[idx] = decode_ascii(da)
else:
dt = np.dtype(recformat.dtype)
arr_len = count * dt.itemsize
dummy[idx] = raw_data[offset : offset + arr_len].view(dt)
if column.dim and len(vla_shape) > 1:
# The VLA is reshaped consistently with TDIM instructions
if vla_shape[0] == 1:
dummy[idx] = dummy[idx].reshape(1, len(dummy[idx]))
else:
vla_dim = vla_shape[1:]
vla_first = int(len(dummy[idx]) / np.prod(vla_dim))
dummy[idx] = dummy[idx].reshape((vla_first,) + vla_dim)
dummy[idx].dtype = dummy[idx].dtype.newbyteorder(">")
# Each array in the field may now require additional
# scaling depending on the other scaling parameters
# TODO: The same scaling parameters apply to every
# array in the column so this is currently very slow; we
# really only need to check once whether any scaling will
# be necessary and skip this step if not
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
dummy[idx] = self._convert_other(column, dummy[idx], recformat)
return dummy
def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
format = column.format
recformat = getattr(format, "recformat", ASCII2NUMPY[format[0]])
# if the string = TNULL, return ASCIITNULL
nullval = str(column.null).strip().encode("ascii")
if len(nullval) > format.width:
nullval = nullval[: format.width]
# Before using .replace make sure that any trailing bytes in each
# column are filled with spaces, and *not*, say, nulls; this causes
# functions like replace to potentially leave gibberish bytes in the
# array buffer.
dummy = np.char.ljust(field, format.width)
dummy = np.char.replace(dummy, encode_ascii("D"), encode_ascii("E"))
null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))
# Convert all fields equal to the TNULL value (nullval) to empty fields.
# TODO: These fields really should be converted to NaN or something else undefined.
# Currently they are converted to empty fields, which are then set to zero.
dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)
# always replace empty fields, see https://github.com/astropy/astropy/pull/5394
if nullval != b"":
dummy = np.where(np.char.strip(dummy) == b"", null_fill, dummy)
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
indx = self.names.index(column.name)
raise ValueError(
"{}; the header may be missing the necessary TNULL{} "
"keyword or the table contains invalid data".format(exc, indx + 1)
)
return dummy
def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
which case the original column array is returned.
"""
if isinstance(recformat, _FormatX):
# special handling for the X format
return self._convert_x(field, recformat)
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, dim = scale_factors
indx = self.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
# For now, check that these are ASCII columns by checking the coldefs
# type; in the future all columns (for binary tables, ASCII tables, or
# otherwise) should "know" what type they are already and how to handle
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
if dim:
# See if the dimensions already match, if not, make sure the
# number items will fit in the specified dimensions
if field.ndim > 1:
actual_shape = field.shape[1:]
if _str:
actual_shape = actual_shape + (field.itemsize,)
else:
actual_shape = field.shape[0]
if dim == actual_shape:
# The array already has the correct dimensions, so we
# ignore dim and don't convert
dim = None
else:
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
elif len(field.shape) == 1:
# No repeat count in TFORMn, equivalent to 1
actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems and not isinstance(recformat, _FormatP):
warnings.warn(
"TDIM{} value {:d} does not fit with the size of "
"the array items ({:d}). TDIM{:d} will be ignored.".format(
indx + 1, self._coldefs[indx].dims, actual_nitems, indx + 1
)
)
dim = None
# further conversion for both ASCII and binary tables
# For now we've made columns responsible for *knowing* whether their
# data has been scaled, but we make the FITS_rec class responsible for
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
if not column.ascii and column.format.p_format:
format_code = column.format.p_format
else:
# TODO: Rather than having this if/else it might be nice if the
# ColumnFormat class had an attribute guaranteed to give the format
# of actual values in a column regardless of whether the true
# format is something like P or Q
format_code = column.format.format
if _number and (_scale or _zero) and not column._physical_values:
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
if bzero == 2**15 and format_code == "I":
field = np.array(field, dtype=np.uint16)
elif bzero == 2**31 and format_code == "J":
field = np.array(field, dtype=np.uint32)
elif bzero == 2**63 and format_code == "K":
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2**63)
else:
field = np.array(field, dtype=np.float64)
else:
field = np.array(field, dtype=np.float64)
if _scale:
np.multiply(field, bscale, field)
if _zero:
if self._uint and format_code == "K":
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
test_overflow += bzero64
except OverflowError:
warnings.warn(
"Overflow detected while applying TZERO{:d}. "
"Returning unscaled data.".format(indx + 1)
)
else:
field = test_overflow
else:
field += bzero
# mark the column as scaled
column._physical_values = True
elif _bool and field.dtype != bool:
field = np.equal(field, ord("T"))
elif _str:
if not self._character_as_bytes:
with suppress(UnicodeDecodeError):
field = decode_ascii(field)
if dim and not isinstance(recformat, _FormatP):
# Apply the new field item dimensions
nitems = reduce(operator.mul, dim)
if field.ndim > 1:
field = field[:, :nitems]
if _str:
fmt = field.dtype.char
dtype = (f"|{fmt}{dim[-1]}", dim[:-1])
field.dtype = dtype
else:
field.shape = (field.shape[0],) + dim
return field
def _get_heap_data(self):
"""
Returns a pointer into the table's raw data to its heap (if present).
This is returned as a numpy byte array.
"""
if self._heapsize:
raw_data = self._get_raw_data().view(np.ubyte)
heap_end = self._heapoffset + self._heapsize
return raw_data[self._heapoffset : heap_end]
else:
return np.array([], dtype=np.ubyte)
def _get_raw_data(self):
"""
Returns the base array of self that "raw data array" that is the
array in the format that it was first read from a file before it was
sliced or viewed as a different type in any way.
This is determined by walking through the bases until finding one that
has at least the same number of bytes as self, plus the heapsize. This
may be the immediate .base but is not always. This is used primarily
for variable-length array support which needs to be able to find the
heap (the raw data *may* be larger than nbytes + heapsize if it
contains a gap or padding).
May return ``None`` if no array resembling the "raw data" according to
the stated criteria can be found.
"""
raw_data_bytes = self.nbytes + self._heapsize
base = self
while hasattr(base, "base") and base.base is not None:
base = base.base
# Variable-length-arrays: should take into account the case of
# empty arrays
if hasattr(base, "_heapoffset"):
if hasattr(base, "nbytes") and base.nbytes > raw_data_bytes:
return base
# non variable-length-arrays
else:
if hasattr(base, "nbytes") and base.nbytes >= raw_data_bytes:
return base
def _get_scale_factors(self, column):
"""Get all the scaling flags and factors for one column."""
# TODO: Maybe this should be a method/property on Column? Or maybe
# it's not really needed at all...
_str = column.format.format == "A"
_bool = column.format.format == "L"
_number = not (_bool or _str)
bscale = column.bscale
bzero = column.bzero
_scale = bscale not in ("", None, 1)
_zero = bzero not in ("", None, 0)
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
# column._dims gives a tuple, rather than column.dim which returns the
# original string format code from the FITS header...
dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
def _scale_back(self, update_heap_pointers=True):
"""
Update the parent array, using the (latest) scaled array.
If ``update_heap_pointers`` is `False`, this will leave all the heap
pointers in P/Q columns as they are verbatim--it only makes sense to do
this if there is already data on the heap and it can be guaranteed that
that data has not been modified, and there is not new data to add to
the heap. Currently this is only used as an optimization for
CompImageHDU that does its own handling of the heap.
"""
# Running total for the new heap size
heapsize = 0
for indx, name in enumerate(self.dtype.names):
column = self._coldefs[indx]
recformat = column.format.recformat
raw_field = _get_recarray_field(self, indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(recformat, _FormatP):
# Irritatingly, this can return a different dtype than just
# doing np.dtype(recformat.dtype); but this returns the results
# that we want. For example if recformat.dtype is 'a' we want
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
raw_field[:] = 0 # reset
npts = [np.prod(arr.shape) for arr in self._converted[name]]
raw_field[: len(npts), 0] = npts
raw_field[1:, 1] = (
np.add.accumulate(raw_field[:-1, 0]) * dtype.itemsize
)
raw_field[:, 1][:] += heapsize
heapsize += raw_field[:, 0].sum() * dtype.itemsize
# Even if this VLA has not been read or updated, we need to
# include the size of its constituent arrays in the heap size
# total
if heapsize >= 2**31:
raise ValueError(
"The heapsize limit for 'P' format has been reached. "
"Please consider using the 'Q' format for your file."
)
if isinstance(recformat, _FormatX) and name in self._converted:
_wrapx(self._converted[name], raw_field, recformat.repeat)
continue
scale_factors = self._get_scale_factors(column)
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = scale_factors
field = self._converted.get(name, raw_field)
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero) and column._physical_values:
dummy = field.copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
# This will set the raw values in the recarray back to
# their non-physical storage values, so the column should
# be mark is not scaled
column._physical_values = False
elif _str or isinstance(self._coldefs, _AsciiColDefs):
dummy = field
else:
continue
# ASCII table, convert numbers to strings
if isinstance(self._coldefs, _AsciiColDefs):
self._scale_back_ascii(indx, dummy, raw_field)
# binary table string column
elif isinstance(raw_field, chararray.chararray):
self._scale_back_strings(indx, dummy, raw_field)
# all other binary table columns
else:
if len(raw_field) and isinstance(raw_field[0], np.integer):
dummy = np.around(dummy)
if raw_field.shape == dummy.shape:
raw_field[:] = dummy
else:
# Reshaping the data is necessary in cases where the
# TDIMn keyword was used to shape a column's entries
# into arrays
raw_field[:] = dummy.ravel().view(raw_field.dtype)
del dummy
# ASCII table does not have Boolean type
elif _bool and name in self._converted:
choices = (
np.array([ord("F")], dtype=np.int8)[0],
np.array([ord("T")], dtype=np.int8)[0],
)
raw_field[:] = np.choose(field, choices)
# Store the updated heapsize
self._heapsize = heapsize
def _scale_back_strings(self, col_idx, input_field, output_field):
# There are a few possibilities this has to be able to handle properly
# The input_field, which comes from the _converted column is of dtype
# 'Un' so that elements read out of the array are normal str
# objects (i.e. unicode strings)
#
# At the other end the *output_field* may also be of type 'S' or of
# type 'U'. It will *usually* be of type 'S' because when reading
# an existing FITS table the raw data is just ASCII strings, and
# represented in Numpy as an S array. However, when a user creates
# a new table from scratch, they *might* pass in a column containing
# unicode strings (dtype 'U'). Therefore the output_field of the
# raw array is actually a unicode array. But we still want to make
# sure the data is encodable as ASCII. Later when we write out the
# array we use, in the dtype 'U' case, a different write routine
# that writes row by row and encodes any 'U' columns to ASCII.
# If the output_field is non-ASCII we will worry about ASCII encoding
# later when writing; otherwise we can do it right here
if input_field.dtype.kind == "U" and output_field.dtype.kind == "S":
try:
_ascii_encode(input_field, out=output_field)
except _UnicodeArrayEncodeError as exc:
raise ValueError(
"Could not save column '{}': Contains characters that "
"cannot be encoded as ASCII as required by FITS, starting "
"at the index {!r} of the column, and the index {} of "
"the string at that location.".format(
self._coldefs[col_idx].name,
exc.index[0] if len(exc.index) == 1 else exc.index,
exc.start,
)
)
else:
# Otherwise go ahead and do a direct copy into--if both are type
# 'U' we'll handle encoding later
input_field = input_field.flatten().view(output_field.dtype)
output_field.flat[:] = input_field
# Ensure that blanks at the end of each string are
# converted to nulls instead of spaces, see Trac #15
# and #111
_rstrip_inplace(output_field)
def _scale_back_ascii(self, col_idx, input_field, output_field):
"""
Convert internal array values back to ASCII table representation.
The ``input_field`` is the internal representation of the values, and
the ``output_field`` is the character array representing the ASCII
output that will be written.
"""
starts = self._coldefs.starts[:]
spans = self._coldefs.spans
format = self._coldefs[col_idx].format
# The the index of the "end" column of the record, beyond
# which we can't write
end = super().field(-1).itemsize
starts.append(end + starts[-1])
if col_idx > 0:
lead = starts[col_idx] - starts[col_idx - 1] - spans[col_idx - 1]
else:
lead = 0
if lead < 0:
warnings.warn(
f"Column {col_idx + 1} starting point overlaps the previous column."
)
trail = starts[col_idx + 1] - starts[col_idx] - spans[col_idx]
if trail < 0:
warnings.warn(
f"Column {col_idx + 1} ending point overlaps the next column."
)
# TODO: It would be nice if these string column formatting
# details were left to a specialized class, as is the case
# with FormatX and FormatP
if "A" in format:
_pc = "{:"
else:
_pc = "{:>"
fmt = "".join([_pc, format[1:], ASCII2STR[format[0]], "}", (" " * trail)])
# Even if the format precision is 0, we should output a decimal point
# as long as there is space to do so--not including a decimal point in
# a float value is discouraged by the FITS Standard
trailing_decimal = format.precision == 0 and format.format in ("F", "E", "D")
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for jdx, value in enumerate(input_field):
value = fmt.format(value)
if len(value) > starts[col_idx + 1] - starts[col_idx]:
raise ValueError(
"Value {!r} does not fit into the output's itemsize of {}.".format(
value, spans[col_idx]
)
)
if trailing_decimal and value[0] == " ":
# We have some extra space in the field for the trailing
# decimal point
value = value[1:] + "."
output_field[jdx] = value
# Replace exponent separator in floating point numbers
if "D" in format:
output_field[:] = output_field.replace(b"E", b"D")
def tolist(self):
# Override .tolist to take care of special case of VLF
column_lists = [self[name].tolist() for name in self.columns.names]
return [list(row) for row in zip(*column_lists)]
def _get_recarray_field(array, key):
"""
Compatibility function for using the recarray base class's field method.
This incorporates the legacy functionality of returning string arrays as
Numeric-style chararray objects.
"""
# Numpy >= 1.10.dev recarray no longer returns chararrays for strings
# This is currently needed for backwards-compatibility and for
# automatic truncation of trailing whitespace
field = np.recarray.field(array, key)
if field.dtype.char in ("S", "U") and not isinstance(field, chararray.chararray):
field = field.view(chararray.chararray)
return field
class _UnicodeArrayEncodeError(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
|
5c89a2399b2ca5ecb00e92f13483ac823f1127f65219bd8c9bc199f49f831ca9 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
f"Floating point {value!r} values are not allowed in FITS headers."
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
f"ASCII characters; {comment!r} contains characters not "
"representable in ASCII or non-printable characters."
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued keyword cards by "
"setting the field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = ".".join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = ".".join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
f"Unparsable card ({self.keyword}), fix it first with .verify('fix')."
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip().replace("''", "'")
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = ".".join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
keyword = self.keyword.split(".", 1)[0]
return "{:{len}}".format(keyword, len=KEYWORD_LENGTH)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = "".join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
f"The header keyword {self.keyword!r} with its value is too long"
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
dict(
err_text=(
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
dict(
err_text=f"Card keyword {keyword!r} is not upper case.",
fix_text=fix_text,
fix=self._fix_keyword,
)
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False)
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
dict(
err_text=(
f"Unprintable string {valuecomment!r}; commentary "
"cards may only contain printable ASCII characters"
),
fixable=False,
)
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
dict(
err_text=(
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
dict(
err_text=(
f"Unprintable string {comment!r}; header comments "
"may only contain printable ASCII characters"
),
fixable=False,
)
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = f"{value:.16G}"
if "." not in value_str and "E" not in value_str:
value_str += ".0"
elif "E" in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split("E")
if exponent[0] in ("+", "-"):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ""
value_str = f"{significand}E{sign}{int(exponent):02d}"
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find("E")
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
|
71211b4944f81282561cb61a6280f7b0dda42b656a057097a0fc60a315892d07 | # Licensed under a 3-clause BSD style license
import os
from setuptools import Extension
SRC_DIR = os.path.join(os.path.dirname(__file__), "src")
def get_extensions():
return [
Extension(
"astropy.io.fits._tiled_compression._compression",
sources=[
os.path.join(SRC_DIR, "compression.c"),
os.path.join(SRC_DIR, "unquantize.c"),
os.path.join("cextern", "cfitsio", "lib", "pliocomp.c"),
os.path.join("cextern", "cfitsio", "lib", "ricecomp.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hcompress.c"),
os.path.join("cextern", "cfitsio", "lib", "fits_hdecompress.c"),
os.path.join("cextern", "cfitsio", "lib", "quantize.c"),
],
include_dirs=[SRC_DIR],
)
]
|
264c4cad62facd7c4de96f3b01d965c5423c408ae246f6859d5bf0f4cd398646 | """
This module contains low level helper functions for compressing and
decompressing buffer for the Tiled Table Compression algorithms as specified in
the FITS 4 standard.
"""
import sys
import numpy as np
from astropy.io.fits.hdu.base import BITPIX2DTYPE
from .codecs import PLIO1, Gzip1, Gzip2, HCompress1, Rice1
from .quantization import DITHER_METHODS, QuantizationFailedException, Quantize
from .utils import _iter_array_tiles
ALGORITHMS = {
"GZIP_1": Gzip1,
"GZIP_2": Gzip2,
"RICE_1": Rice1,
"RICE_ONE": Rice1,
"PLIO_1": PLIO1,
"HCOMPRESS_1": HCompress1,
}
DEFAULT_ZBLANK = -2147483648
__all__ = ["compress_hdu", "decompress_hdu"]
def _decompress_tile(buf, *, algorithm: str, **settings):
"""
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).decode(buf)
def _compress_tile(buf, *, algorithm: str, **settings):
"""
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).encode(buf)
def _tile_shape(header):
return tuple(header[f"ZTILE{idx}"] for idx in range(header["ZNAXIS"], 0, -1))
def _data_shape(header):
return tuple(header[f"ZNAXIS{idx}"] for idx in range(header["ZNAXIS"], 0, -1))
def _header_to_settings(header, actual_tile_shape):
settings = {}
if header["ZCMPTYPE"] == "GZIP_2":
settings["itemsize"] = abs(header["ZBITPIX"]) // 8
elif header["ZCMPTYPE"] == "PLIO_1":
# We have to calculate the tilesize from the shape of the tile not the
# header, so that it's correct for edge tiles etc.
settings["tilesize"] = np.product(actual_tile_shape)
elif header["ZCMPTYPE"] in ("RICE_1", "RICE_ONE"):
settings["blocksize"] = _get_compression_setting(header, "BLOCKSIZE", 32)
settings["bytepix"] = _get_compression_setting(header, "BYTEPIX", 4)
settings["tilesize"] = np.product(actual_tile_shape)
elif header["ZCMPTYPE"] == "HCOMPRESS_1":
settings["bytepix"] = 8
settings["scale"] = int(_get_compression_setting(header, "SCALE", 0))
settings["smooth"] = _get_compression_setting(header, "SMOOTH", 0)
# HCOMPRESS requires 2D tiles, so to find the shape of the 2D tile we
# need to ignore all length 1 tile dimensions
# Also cfitsio expects the tile shape in C order
shape_2d = tuple(nd for nd in actual_tile_shape if nd != 1)
if len(shape_2d) != 2:
raise ValueError(f"HCOMPRESS expects two dimensional tiles, got {shape_2d}")
settings["nx"] = shape_2d[0]
settings["ny"] = shape_2d[1]
return settings
def _finalize_array(tile_buffer, *, bitpix, tile_shape, algorithm, lossless):
"""
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape.
"""
if algorithm.startswith("GZIP"):
# This algorithm is taken from fitsio
# https://github.com/astropy/astropy/blob/a8cb1668d4835562b89c0d0b3448ac72ca44db63/cextern/cfitsio/lib/imcompress.c#L6345-L6388
tilelen = np.product(tile_shape)
tilebytesize = len(tile_buffer)
if tilebytesize == tilelen * 2:
dtype = ">i2"
elif tilebytesize == tilelen * 4:
if bitpix < 0 and lossless:
dtype = ">f4"
else:
dtype = ">i4"
elif tilebytesize == tilelen * 8:
if bitpix < 0 and lossless:
dtype = ">f8"
else:
dtype = ">i8"
else:
# Just return the raw bytes
dtype = ">u1"
tile_data = np.asarray(tile_buffer).view(dtype).reshape(tile_shape)
else:
# For RICE_1 compression the tiles that are on the edge can end up
# being padded, so we truncate excess values
if algorithm in ("RICE_1", "RICE_ONE", "PLIO_1"):
tile_buffer = tile_buffer[: np.product(tile_shape)]
if tile_buffer.data.format == "b":
# NOTE: this feels like a Numpy bug - need to investigate
tile_data = np.asarray(tile_buffer, dtype=np.uint8).reshape(tile_shape)
else:
tile_data = np.asarray(tile_buffer).reshape(tile_shape)
return tile_data
def _check_compressed_header(header):
# NOTE: this could potentially be moved up into CompImageHDU, e.g. in a
# _verify method.
# Check for overflows which might cause issues when calling C code
for kw in ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.intc).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["ZNAXIS"] + 1):
for kw_name in ["ZNAXIS", "ZTILE"]:
kw = f"{kw_name}{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for i in range(1, header["NAXIS"] + 1):
kw = f"NAXIS{i}"
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["TNULL1", "PCOUNT", "THEAP"]:
if kw in header:
if header[kw] > 0 and header[kw] > np.iinfo(np.int64).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
for kw in ["ZVAL3"]:
if kw in header:
if header[kw] > np.finfo(np.float32).max:
raise OverflowError(f"{kw} value {header[kw]} is too large")
# Validate data types
for kw in ["ZSCALE", "ZZERO", "TZERO1", "TSCAL1"]:
if kw in header:
if not np.isreal(header[kw]):
raise TypeError(f"{kw} should be floating-point")
for kw in ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"]:
if kw in header:
if not isinstance(header[kw], str):
raise TypeError(f"{kw} should be a string")
for kw in ["ZDITHER0"]:
if kw in header:
if not np.isreal(header[kw]) or not float(header[kw]).is_integer():
raise TypeError(f"{kw} should be an integer")
if "TFORM1" in header:
for valid in ["1PB", "1PI", "1PJ", "1QB", "1QI", "1QJ"]:
if header["TFORM1"].startswith(valid):
break
else:
raise RuntimeError(f"Invalid TFORM1: {header['TFORM1']}")
# Check values
for kw in ["TFIELDS", "PCOUNT"] + [
f"NAXIS{idx + 1}" for idx in range(header["NAXIS"])
]:
if kw in header:
if header[kw] < 0:
raise ValueError(f"{kw} should not be negative.")
for kw in ["ZNAXIS", "TFIELDS"]:
if kw in header:
if header[kw] < 0 or header[kw] > 999:
raise ValueError(f"{kw} should be in the range 0 to 999")
if header["ZBITPIX"] not in [8, 16, 32, 64, -32, -64]:
raise ValueError(f"Invalid value for BITPIX: {header['ZBITPIX']}")
if header["ZCMPTYPE"] not in ALGORITHMS:
raise ValueError(f"Unrecognized compression type: {header['ZCMPTYPE']}")
# Check that certain keys are present
header["ZNAXIS"]
header["ZBITPIX"]
def _get_compression_setting(header, name, default):
# Settings for the various compression algorithms are stored in pairs of
# keywords called ZNAME? and ZVAL? - a given compression setting could be
# in any ZNAME? so we need to check through all the possible ZNAMEs which
# one matches the required setting.
for i in range(1, 1000):
if f"ZNAME{i}" not in header:
break
if header[f"ZNAME{i}"].lower() == name.lower():
return header[f"ZVAL{i}"]
return default
def decompress_hdu(hdu):
"""
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to decompress the data for.
Returns
-------
data : `numpy.ndarray`
The decompressed data array.
"""
_check_compressed_header(hdu._header)
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
data = np.zeros(data_shape, dtype=BITPIX2DTYPE[hdu._header["ZBITPIX"]])
quantized = "ZSCALE" in hdu.compressed_data.dtype.names
if len(hdu.compressed_data) == 0:
return None
override_itemsize = None
for irow, tile_slices in enumerate(_iter_array_tiles(data_shape, tile_shape)):
row = hdu.compressed_data[irow]
# For tiles near the edge, the tile shape from the header might not be
# correct so we have to pass the shape manually.
actual_tile_shape = data[tile_slices].shape
settings = _header_to_settings(hdu._header, actual_tile_shape)
cdata = row["COMPRESSED_DATA"]
# When quantizing floating point data, sometimes the data will not
# quantize efficiently. In these cases the raw floating point data can
# be losslessly GZIP compressed and stored in the `GZIP_COMPRESSED_DATA`
# column.
gzip_fallback = len(cdata) == 0
if gzip_fallback:
tile_buffer = _decompress_tile(
row["GZIP_COMPRESSED_DATA"], algorithm="GZIP_1"
)
tile_data = _finalize_array(
tile_buffer,
bitpix=hdu._header["ZBITPIX"],
tile_shape=actual_tile_shape,
algorithm="GZIP_1",
lossless=True,
)
else:
if hdu._header["ZCMPTYPE"] == "GZIP_2":
# Decompress with GZIP_1 just to find the total number of
# elements in the uncompressed data. We just need to do this once
# as this will be the same for all tiles.
if override_itemsize is None:
tile_data = np.asarray(_decompress_tile(cdata, algorithm="GZIP_1"))
override_itemsize = tile_data.size // int(
np.product(actual_tile_shape)
)
settings["itemsize"] = override_itemsize
tile_buffer = _decompress_tile(
cdata, algorithm=hdu._header["ZCMPTYPE"], **settings
)
tile_data = _finalize_array(
tile_buffer,
bitpix=hdu._header["ZBITPIX"],
tile_shape=actual_tile_shape,
algorithm=hdu._header["ZCMPTYPE"],
lossless=not quantized,
)
if "ZBLANK" in row.array.names:
zblank = row["ZBLANK"]
else:
zblank = hdu._header.get("ZBLANK", None)
if zblank is not None:
blank_mask = tile_data == zblank
if quantized:
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=None,
bitpix=hdu._header["ZBITPIX"],
)
tile_data = np.asarray(
q.decode_quantized(tile_data, row["ZSCALE"], row["ZZERO"])
).reshape(actual_tile_shape)
if zblank is not None:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
tile_data[blank_mask] = np.nan
data[tile_slices] = tile_data
return data
def compress_hdu(hdu):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
hdu : `astropy.io.fits.CompImageHDU`
Input HDU to compress the data for.
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(hdu.data, np.ndarray):
raise TypeError("CompImageHDU.data must be a numpy.ndarray")
_check_compressed_header(hdu._header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(hdu._header)
data_shape = _data_shape(hdu._header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(hdu._header, "noisebit", 0)
for irow, tile_slices in enumerate(_iter_array_tiles(data_shape, tile_shape)):
data = hdu.data[tile_slices]
settings = _header_to_settings(hdu._header, data.shape)
quantize = "ZSCALE" in hdu.columns.dtype.names
if data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[hdu._header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = hdu._header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=hdu._header["ZBITPIX"],
)
original_shape = data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
data = data.copy()
if np.all(nan_mask):
data[nan_mask] = 0
else:
data[nan_mask] = np.nanmin(data)
try:
data, scale, zero = q.encode_quantized(data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
data = np.asarray(data).reshape(original_shape)
if any_nan:
if not data.flags.writeable:
data = data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
# The original compress_hdu assumed the data was in native endian, so we
# change this here:
if hdu._header["ZCMPTYPE"].startswith("GZIP") or gzip_fallback[-1]:
# This is apparently needed so that our heap data agrees with
# the C implementation!?
data = data.astype(data.dtype.newbyteorder(">"))
else:
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder("="))
if gzip_fallback[-1]:
cbytes = _compress_tile(data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(data, algorithm=hdu._header["ZCMPTYPE"], **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
hdu._header["ZBLANK"] = zblank
table = np.zeros(len(compressed_bytes), dtype=hdu.columns.dtype.newbyteorder(">"))
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if hdu._header["ZCMPTYPE"] == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if hdu._header["ZCMPTYPE"] == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2").tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
if len(table_bytes) != hdu._theap:
raise Exception(
f"Unexpected compressed table size (expected {hdu._theap}, got {len(table_bytes)})"
)
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
|
f735675152455814b189c7a038a9d6d5643b7e5d58b8b14dd51e5f15da678808 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from astropy.io.fits.column import (
ATTRIBUTE_TO_KEYWORD,
FITS2NUMPY,
KEYWORD_NAMES,
KEYWORD_TO_ATTRIBUTE,
TDEF_RE,
ColDefs,
Column,
_AsciiColDefs,
_cmp_recformats,
_convert_format,
_FormatP,
_FormatQ,
_makep,
_parse_tformat,
_scalar_to_format,
)
from astropy.io.fits.fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from astropy.io.fits.header import Header, _pad_length
from astropy.io.fits.util import _is_int, _str_to_num, path_like
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from .base import DELAYED, ExtensionHDU, _ValidHDU
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = " "
lineterminator = "\n"
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(
cls,
columns,
header=None,
nrows=0,
fill=False,
character_as_bytes=False,
**kwargs,
):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(
coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes
)
hdu = cls(
data=data, header=header, character_as_bytes=character_as_bytes, **kwargs
)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (
any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats)
and self._data_size is not None
and self._data_size > self._theap
):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset)
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data = raw_data[:tbsize].view(dtype=columns.dtype, type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder(">")
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header["PCOUNT"]
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
super().__init__(data=data, header=header, name=name, ver=ver)
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError("No header to setup HDU.")
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
("XTENSION", self._extension, self._ext_comment),
("BITPIX", 8, "array data type"),
("NAXIS", 2, "number of array dimensions"),
("NAXIS1", 0, "length of dimension 1"),
("NAXIS2", 0, "length of dimension 2"),
("PCOUNT", 0, "number of group parameters"),
("GCOUNT", 1, "number of groups"),
("TFIELDS", 0, "number of table fields"),
]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ", ".join(x + "n" for x in sorted(future_ignore))
warnings.warn(
"The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys),
AstropyDeprecationWarning,
)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header["NAXIS1"] = self.data._raw_itemsize
self._header["NAXIS2"] = self.data.shape[0]
self._header["TFIELDS"] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if "data" in self.__dict__:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ",".join(self.columns._recformats)
data = np.rec.array(
None, formats=formats, names=self.columns.names, shape=0
)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
if "data" in self.__dict__:
self.columns._remove_listener(self.__dict__["data"])
self.__dict__["data"] = data
self.columns = self.data.columns
self.columns._add_listener(self.data)
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError("Table data has incorrect type.")
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get("NAXIS2", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header["NAXIS1"] * self._header["NAXIS2"]
return self._header.get("THEAP", size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set("NAXIS1", self.data._raw_itemsize, after="NAXIS")
self._header.set("NAXIS2", self.data.shape[0], after="NAXIS1")
self._header.set("TFIELDS", len(self.columns), after="GCOUNT")
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(), header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header["TFIELDS"] = len(self.data._coldefs)
self._header["NAXIS2"] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
heapstart = self._header.get("THEAP", tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header["PCOUNT"] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat, max=_max)
self._header["TFORM" + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option="warn"):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
if len(self._header) > 1:
if not (
isinstance(self._header[0], str)
and self._header[0].rstrip() == self._extension
):
err_text = "The XTENSION keyword must match the HDU type."
fix_text = f"Converted the XTENSION keyword to {self._extension}."
def fix(header=self._header):
header[0] = (self._extension, self._ext_comment)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
self.req_cards("NAXIS", None, lambda v: (v == 2), 2, option, errs)
self.req_cards("BITPIX", None, lambda v: (v == 8), 8, option, errs)
self.req_cards(
"TFIELDS",
7,
lambda v: (_is_int(v) and v >= 0 and v <= 999),
0,
option,
errs,
)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TFORM" + str(idx + 1), None, None, None, option, errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
nrows = self._header["NAXIS2"]
ncols = self._header["TFIELDS"]
format = ", ".join(
[self._header["TFORM" + str(j + 1)] for j in range(ncols)]
)
format = f"[{format}]"
dims = f"{nrows}R x {ncols}C"
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(
self, column, col_idx, attr, old_value, new_value
):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(
before_keyword, (keyword, new_value), after=True
)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1 :]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword, (keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group("label")
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
}:
continue
num = int(match.group("num")) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword, num))
# First delete
rev_sorted_idx_0 = sorted(
table_keywords, key=operator.itemgetter(0), reverse=True
)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value, old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if "TFIELDS" in self._header:
self._header["TFIELDS"] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "TABLE"
_ext_comment = "ASCII table extension"
_padding_byte = " "
_columns_type = _AsciiColDefs
__format_RE = re.compile(r"(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?")
def __init__(
self, data=None, header=None, name=None, ver=None, character_as_bytes=False
):
super().__init__(
data, header, name=name, ver=ver, character_as_bytes=character_as_bytes
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError(f"Duplicate field names: {dup}")
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = "S" + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header["NAXIS1"] > itemsize:
data_type = "S" + str(
columns.spans[idx] + self._header["NAXIS1"] - itemsize
)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b" ", dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option="warn"):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards("PCOUNT", None, lambda v: (v == 0), 0, option, errs)
tfields = self._header["TFIELDS"]
for idx in range(tfields):
self.req_cards("TBCOL" + str(idx + 1), None, _is_int, None, option, errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = "BINTABLE"
_ext_comment = "binary table extension"
def __init__(
self,
data=None,
header=None,
name=None,
uint=False,
ver=None,
character_as_bytes=False,
):
from astropy.table import Table
if isinstance(data, Table):
from astropy.io.fits.convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(
data,
header,
name=name,
uint=uint,
ver=ver,
character_as_bytes=character_as_bytes,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension in (cls._extension, "A3DTABLE")
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data.
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
if data._get_raw_data() is None:
# This block is still needed because
# test_variable_length_table_data leads to ._get_raw_data
# returning None which means _get_heap_data doesn't work.
# Which happens when the data is loaded in memory rather than
# being unloaded on disk
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
else:
csum = self._compute_checksum(data._get_heap_data(), csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
# to avoid a bug in the lustre filesystem client, don't
# write 0-byte objects
if data._gap > 0:
fileobj.write((data._gap * "\0").encode("ascii"))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx], _FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx) for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == "U":
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i + 1 :])
item = np.char.encode(item, "ascii")
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j + 1 :])
# Fix padding problem (see #5296).
padding = "\x00" * (field_width - item_length)
fileobj.write(padding.encode("ascii"))
_tdump_file_format = textwrap.dedent(
"""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
"""
)
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : path-like or file-like, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : path-like or file-like, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : path-like or file-like, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
if isinstance(datafile, path_like):
datafile = os.path.expanduser(datafile)
if isinstance(cdfile, path_like):
cdfile = os.path.expanduser(cdfile)
if isinstance(hfile, path_like):
hfile = os.path.expanduser(hfile)
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, path_like):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(
" ".join([f"File '{f}' already exists." for f in exist])
+ " If you mean to replace the file(s) then use the argument "
"'overwrite=True'."
)
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep="\n", endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace("\n", "\n ")
def load(cls, datafile, cdfile=None, hfile=None, replace=False, header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : path-like or file-like
Input data file containing the table data in ASCII format.
cdfile : path-like or file-like, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : path-like or file-like, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool, optional
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : `~astropy.io.fits.Header`, optional
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supersedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(
Header.fromtextfile(hfile), update=True, update_first=True
)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace("\n", "\n ")
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + ".txt"
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == "S":
itemsize = int(format[1:])
return "{:{size}}".format(val, size=itemsize)
elif format in np.typecodes["AllInteger"]:
# output integer
return f"{val:21d}"
elif format in np.typecodes["Complex"]:
return f"{val.real:21.15g}+{val.imag:.15g}j"
elif format in np.typecodes["Float"]:
# output floating point
return f"{val:#21.15g}"
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append("VLA_Length=")
line.append(f"{len(row[column.name]):21d}")
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == "V":
array_format = dtype.base.char
if array_format == "S":
array_format += str(dtype.itemsize)
if dtype.char == "V":
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name], array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "w")
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ["disp", "unit", "dim", "null", "bscale", "bzero"]
line += [
"{!s:16s}".format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)
]
fileobj.write(" ".join(line))
fileobj.write("\n")
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == "VLA_Length=":
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(
np.recarray(shape=1, dtype=dtype), nrows=nrows, fill=True
)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)) :]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY["L"]:
return bool(int(val))
elif recformats[col] == FITS2NUMPY["M"]:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == "VLA_Length=":
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx : idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [
format_value(col, val) for val in line[slice_]
]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, path_like):
fileobj = os.path.expanduser(fileobj)
fileobj = open(fileobj)
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ["name", "format", "disp", "unit", "dim"]:
kwargs[key] = words.pop(0).replace('""', "")
for key in ["null", "bscale", "bzero"]:
word = words.pop(0).replace('""', "")
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (
not isinstance(c, chararray.chararray)
and c.itemsize > 1
and c.dtype.str[0] in swap_types
):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({"names": names, "formats": formats, "offsets": offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
efc532120cd5fee844c842e4548f152ea13b9bb3245510cf3a782b68142d80b0 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import gc
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits._tiled_compression import compress_hdu, decompress_hdu
from astropy.io.fits.card import Card
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.column import TDEF_RE, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU
from .image import ImageHDU
from .table import BinTableHDU
# This global variable is used e.g., when calling fits.open with
# disable_image_compression which temporarily changes the global variable to
# False. This should ideally be refactored to avoid relying on global module
# variables.
COMPRESSION_ENABLED = True
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: "NO_DITHER",
SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1",
SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2",
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = ("RICE_1", "GZIP_1", "GZIP_2", "PLIO_1", "HCOMPRESS_1")
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = "RICE_1"
DEFAULT_QUANTIZE_LEVEL = 16.0
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"}
COMPRESSION_KEYWORDS = {
"ZIMAGE",
"ZCMPTYPE",
"ZBITPIX",
"ZNAXIS",
"ZMASKCMP",
"ZSIMPLE",
"ZTENSION",
"ZEXTEND",
}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
"SIMPLE": "ZSIMPLE",
"XTENSION": "ZTENSION",
"BITPIX": "ZBITPIX",
"NAXIS": "ZNAXIS",
"EXTEND": "ZEXTEND",
"BLOCKED": "ZBLOCKED",
"PCOUNT": "ZPCOUNT",
"GCOUNT": "ZGCOUNT",
"CHECKSUM": "ZHECKSUM",
"DATASUM": "ZDATASUM",
}
_zdef_re = re.compile(r"(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?")
_compression_keywords = set(_keyword_remaps.values()).union(
["ZIMAGE", "ZCMPTYPE", "ZMASKCMP", "ZQUANTIZ", "ZDITHER0"]
)
_indexed_compression_keywords = {"ZNAXIS", "ZTILE", "ZNAME", "ZVAL"}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith(
"HIERARCH "
):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False, after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after, replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(
card, before=remapped_before, after=remapped_after, replace=replace
)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = (
"Keyword {!r} is reserved for use by the FITS Tiled Image "
"Convention and will not be stored in the header for the "
"image being compressed.".format(keyword)
)
if keyword == "TFIELDS":
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group("label").upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group("label").upper()
num = m.group("num")
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == "NAXIS":
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f"ZNAXIS{index}"
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword, repeat))
return idx
def clear(self):
"""
Remove all cards from the header.
"""
self._table_header.clear()
super().clear()
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_default_name = "COMPRESSED_IMAGE"
def __init__(
self,
data=None,
header=None,
name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_size=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False,
scale_back=False,
**kwargs,
):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``
tile_size : int, optional
Compression tile sizes. Default treats each row of image as a
tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_size`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(
header,
name,
compression_type=compression_type,
tile_size=tile_size,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed,
)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [
self._header.get("ZNAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("ZNAXIS", 0))
]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._bitpix = self._header["ZBITPIX"]
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if "EXTNAME" in header:
indices = header._keyword_indices["EXTNAME"]
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [
index for index in indices if header[index] == self._default_name
]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self.header:
self.header["EXTNAME"] = value
else:
self.header["EXTNAME"] = (value, "extension name")
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != "XTENSION":
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ("BINTABLE", "A3DTABLE"):
return False
if "ZIMAGE" not in header or not header["ZIMAGE"]:
return False
return COMPRESSION_ENABLED
def _update_header_data(
self,
image_header,
name=None,
compression_type=None,
tile_size=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None,
):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1'; if this value is `None`, use value already in the
header; if no value already in the header, use 'RICE_1'
tile_size : sequence of int, optional
compression tile sizes as a list; if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2**32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and "EXTNAME" not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set(
"EXTNAME",
self._default_name,
"name of this binary table extension",
after="TFIELDS",
)
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
"Unknown compression type provided (supported are {}). "
"Default ({}) compression will be used.".format(
", ".join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE,
),
AstropyUserWarning,
)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set(
"ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS"
)
else:
compression_type = self._header.get("ZCMPTYPE", DEFAULT_COMPRESSION_TYPE)
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get("BZERO", 0.0)
bscale = image_header.get("BSCALE", 1.0)
after_keyword = "EXTNAME"
if bscale != 1.0:
self._header.set("BSCALE", bscale, after=after_keyword)
after_keyword = "BSCALE"
if bzero != 0.0:
self._header.set("BZERO", bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments["BITPIX"]
except (AttributeError, KeyError):
bitpix_comment = "data type of original image"
try:
naxis_comment = image_header.comments["NAXIS"]
except (AttributeError, KeyError):
naxis_comment = "dimension of original image"
# Set the label for the first column in the table
self._header.set(
"TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS"
)
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == "PLIO_1":
tform1 = "1QI" if huge_hdu else "1PI"
else:
tform1 = "1QB" if huge_hdu else "1PB"
self._header.set(
"TFORM1",
tform1,
"data format of field: variable length array",
after="TTYPE1",
)
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header["TTYPE1"], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header["BITPIX"]
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = "GZIP_COMPRESSED_DATA"
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = "1QB" if huge_hdu else "1PB"
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1")
self._header.set(
"TFORM2",
tform2,
"data format of field: variable length array",
after="TTYPE2",
)
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2")
self._header.set(
"TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3"
)
col3 = Column(name=self._header["TTYPE3"], format=self._header["TFORM3"])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3")
self._header.set(
"TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4"
)
after = "TFORM4"
col4 = Column(name=self._header["TTYPE4"], format=self._header["TFORM4"])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = "TFORM1"
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ["TTYPE2", "TFORM2", "TTYPE3", "TFORM3", "TTYPE4", "TFORM4"]
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes")
self._header.set(
"TFIELDS", ncols, "number of fields in each row", after="GCOUNT"
)
self._header.set(
"ZIMAGE", True, "extension contains compressed image", after=after
)
self._header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE")
self._header.set(
"ZNAXIS", self._image_header["NAXIS"], naxis_comment, after="ZBITPIX"
)
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header["ZNAXIS" + str(idx)]
del self._header["ZTILE" + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header["NAXIS"]
if not tile_size:
tile_size = []
elif len(tile_size) != naxis:
warnings.warn(
"Provided tile size not appropriate for the data. "
"Default tile size will be used.",
AstropyUserWarning,
)
tile_size = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == "HCOMPRESS_1":
if self._image_header["NAXIS1"] < 4 or self._image_header["NAXIS2"] < 4:
raise ValueError("Hcompress minimum image dimension is 4 pixels")
elif tile_size:
if tile_size[0] < 4 or tile_size[1] < 4:
# user specified tile size is too small
raise ValueError("Hcompress minimum tile dimension is 4 pixels")
major_dims = len([ts for ts in tile_size if ts > 1])
if major_dims > 2:
raise ValueError(
"HCOMPRESS can only support 2-dimensional tile sizes."
"All but two of the tile_size dimensions must be set "
"to 1."
)
if tile_size and (tile_size[0] == 0 and tile_size[1] == 0):
# compress the whole image as a single tile
tile_size[0] = self._image_header["NAXIS1"]
tile_size[1] = self._image_header["NAXIS2"]
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size[i] = 1
elif not tile_size:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_size.append(self._image_header["NAXIS1"])
if self._image_header["NAXIS2"] <= 30:
tile_size.append(self._image_header["NAXIS1"])
else:
# look for another good tile dimension
naxis2 = self._image_header["NAXIS2"]
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_size.append(dim)
break
else:
tile_size.append(17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size.append(1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header["NAXIS1"] % tile_size[0] # 1st dimen
if remain > 0 and remain < 4:
tile_size[0] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS1"] % tile_size[0]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 1st dimension has less than 4 pixels"
)
remain = self._image_header["NAXIS2"] % tile_size[1] # 2nd dimen
if remain > 0 and remain < 4:
tile_size[1] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS2"] % tile_size[1]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 2nd dimension has less than 4 pixels"
)
# Set up locations for writing the next cards in the header.
last_znaxis = "ZNAXIS"
if self._image_header["NAXIS"] > 0:
after1 = "ZNAXIS1"
else:
after1 = "ZNAXIS"
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = "NAXIS" + str(idx + 1)
znaxis = "ZNAXIS" + str(idx + 1)
ztile = "ZTILE" + str(idx + 1)
if tile_size and len(tile_size) >= idx + 1:
ts = tile_size[idx]
else:
if ztile not in self._header:
# Default tile size
if not idx:
ts = self._image_header["NAXIS1"]
else:
ts = 1
else:
ts = self._header[ztile]
tile_size.append(ts)
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= (axis - 1) // ts + 1
if image_header and naxis in image_header:
self._header.set(
znaxis, axis, image_header.comments[naxis], after=last_znaxis
)
else:
self._header.set(
znaxis, axis, "length of original image axis", after=last_znaxis
)
self._header.set(ztile, ts, "size of tiles to be compressed", after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set("NAXIS2", nrows, "number of rows in table")
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
if self._header[zname] == "NOISEBIT":
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == "SCALE ":
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == "SMOOTH ":
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = "ZCMPTYPE"
idx = 1
if compression_type == "RICE_1":
self._header.set(
"ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword
)
self._header.set(
"ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1"
)
self._header.set(
"ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1"
)
if self._header["ZBITPIX"] == 8:
bytepix = 1
elif self._header["ZBITPIX"] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set(
"ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
elif compression_type == "HCOMPRESS_1":
self._header.set(
"ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword
)
self._header.set(
"ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1"
)
self._header.set(
"ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1"
)
self._header.set(
"ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
if self._image_header["BITPIX"] < 0: # floating point image
self._header.set(
"ZNAME" + str(idx),
"NOISEBIT",
"floating point quantization level",
after=after_keyword,
)
self._header.set(
"ZVAL" + str(idx),
quantize_level,
"floating point quantization level",
after="ZNAME" + str(idx),
)
# Add the dither method and seed
if quantize_method:
if quantize_method not in [
NO_DITHER,
SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2,
]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn(
"Unknown quantization method provided. "
"Default method ({}) used.".format(name)
)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = "No dithering during quantization"
else:
zquantiz_comment = "Pixel Quantization Algorithm"
self._header.set(
"ZQUANTIZ",
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after="ZVAL" + str(idx),
)
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get("ZQUANTIZ", NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if "ZDITHER0" in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header["ZDITHER0"]
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif "ZDITHER0" in self._header:
dither_seed = self._header["ZDITHER0"]
else:
dither_seed = self._generate_dither_seed(DEFAULT_DITHER_SEED)
self._header.set(
"ZDITHER0",
dither_seed,
"dithering offset when quantizing floats",
after="ZQUANTIZ",
)
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if "SIMPLE" in image_header:
self._header.set(
"ZSIMPLE",
image_header["SIMPLE"],
image_header.comments["SIMPLE"],
before="ZBITPIX",
)
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if "EXTEND" in image_header:
self._header.set(
"ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"]
)
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if "BLOCKED" in image_header:
self._header.set(
"ZBLOCKED",
image_header["BLOCKED"],
image_header.comments["BLOCKED"],
)
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in image_header:
self._header.set(
"ZTENSION",
"IMAGE",
image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in image_header:
self._header.set(
"ZPCOUNT",
image_header["PCOUNT"],
image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in image_header:
self._header.set(
"ZGCOUNT",
image_header["GCOUNT"],
image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if "CHECKSUM" in image_header:
self._header.set(
"ZHECKSUM",
image_header["CHECKSUM"],
image_header.comments["CHECKSUM"],
)
if "DATASUM" in image_header:
self._header.set(
"ZDATASUM",
image_header["DATASUM"],
image_header.comments["DATASUM"],
)
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in self._image_header:
self._header.set(
"ZTENSION",
"IMAGE",
self._image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in self._image_header:
self._header.set(
"ZPCOUNT",
self._image_header["PCOUNT"],
self._image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in self._image_header:
self._header.set(
"ZGCOUNT",
self._image_header["GCOUNT"],
self._image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if "ZHECKSUM" in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
@lazyproperty
def data(self):
# The data attribute is the image data (not the table data).
data = decompress_hdu(self)
if data is None:
return data
# Scale the data if necessary
if self._orig_bzero != 0 or self._orig_bscale != 1:
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
if "BLANK" in self._header:
blanks = data == np.array(self._header["BLANK"], dtype="int32")
else:
blanks = None
if self._bscale != 1:
np.multiply(data, self._bscale, data)
if self._bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._bzero, out=data, casting="unsafe")
if blanks is not None:
data = np.where(blanks, np.nan, data)
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (
not isinstance(data, np.ndarray) or data.dtype.fields is not None
):
raise TypeError(
"CompImageHDU data has incorrect type:{}; dtype.fields = {}".format(
type(data), data.dtype.fields
)
)
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__["data"]
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if "compressed_data" in self.__dict__:
del self.__dict__["compressed_data"]._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__["compressed_data"]
# If this file was mmap'd, numpy.memmap will hold open a file
# handle until the underlying mmap object is garbage-collected;
# since this reference leak can sometimes hang around longer than
# welcome go ahead and force a garbage collection
gc.collect()
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, "_image_header"):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
hcomments = self._header.comments
if "ZSIMPLE" in self._header:
image_header.set(
"SIMPLE", self._header["ZSIMPLE"], hcomments["ZSIMPLE"], before=0
)
del image_header["XTENSION"]
elif "ZTENSION" in self._header:
if self._header["ZTENSION"] != "IMAGE":
warnings.warn(
"ZTENSION keyword in compressed extension != 'IMAGE'",
AstropyUserWarning,
)
image_header.set("XTENSION", "IMAGE", hcomments["ZTENSION"], before=0)
else:
image_header.set("XTENSION", "IMAGE", before=0)
image_header.set(
"BITPIX", self._header["ZBITPIX"], hcomments["ZBITPIX"], before=1
)
image_header.set("NAXIS", self._header["ZNAXIS"], hcomments["ZNAXIS"], before=2)
last_naxis = "NAXIS"
for idx in range(image_header["NAXIS"]):
znaxis = "ZNAXIS" + str(idx + 1)
naxis = znaxis[1:]
image_header.set(
naxis, self._header[znaxis], hcomments[znaxis], after=last_naxis
)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header["NAXIS"]
for keyword in list(image_header["NAXIS?*"]):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if "ZPCOUNT" in self._header:
image_header.set(
"PCOUNT",
self._header["ZPCOUNT"],
hcomments["ZPCOUNT"],
after=last_naxis,
)
else:
image_header.set("PCOUNT", 0, after=last_naxis)
if "ZGCOUNT" in self._header:
image_header.set(
"GCOUNT", self._header["ZGCOUNT"], hcomments["ZGCOUNT"], after="PCOUNT"
)
else:
image_header.set("GCOUNT", 1, after="PCOUNT")
if "ZEXTEND" in self._header:
image_header.set("EXTEND", self._header["ZEXTEND"], hcomments["ZEXTEND"])
if "ZBLOCKED" in self._header:
image_header.set("BLOCKED", self._header["ZBLOCKED"], hcomments["ZBLOCKED"])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if "ZHECKSUM" in self._header:
image_header.set(
"CHECKSUM", self._header["ZHECKSUM"], hcomments["ZHECKSUM"]
)
if "ZDATASUM" in self._header:
image_header.set("DATASUM", self._header["ZDATASUM"], hcomments["ZDATASUM"])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if "EXTNAME" in image_header and image_header["EXTNAME"] == self._default_name:
del image_header["EXTNAME"]
# Remove the PCOUNT GCOUNT cards if the uncompressed header is
# from a primary HDU
if "SIMPLE" in image_header:
del image_header["PCOUNT"]
del image_header["GCOUNT"]
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ""
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind(".") + 1 :]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header["NAXIS"]):
_shape += (self.header["NAXIS" + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header["BITPIX"]]
return (self.name, self.ver, class_name, len(self.header), _shape, _format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"=i{self.data.dtype.itemsize}",
)
try:
nrows = self._header["NAXIS2"]
tbsize = self._header["NAXIS1"] * nrows
self._header["PCOUNT"] = 0
if "THEAP" in self._header:
del self._header["THEAP"]
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Make sure that the data is contiguous otherwise CFITSIO
# will not write the expected data
self.data = np.ascontiguousarray(self.data)
# Compress the data.
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compress_hdu(self)
finally:
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder(">")
buf = self.compressed_data
compressed_data = buf[: self._theap].view(dtype=dtype, type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale != 1 or bzero != 0:
_scale = bscale
_zero = bzero
else:
if option == "old":
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax":
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2.0**8 - 1)
else:
_zero = (_max + _min) / 2.0
# throw away -2^N
_scale = (_max - _min) / (2.0 ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting="unsafe")
self.header["BZERO"] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header["BZERO"]
if _scale != 1:
self.data /= _scale
self.header["BSCALE"] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header["BSCALE"]
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get("BZERO", 0)
self._bscale = self.header.get("BSCALE", 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header["BITPIX"] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if "CHECKSUM" in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set(
"CHECKSUM",
image_hdu.header["CHECKSUM"],
image_hdu.header.comments["CHECKSUM"],
)
if "DATASUM" in image_hdu.header:
self._image_header.set(
"DATASUM",
image_hdu.header["DATASUM"],
image_hdu.header.comments["DATASUM"],
)
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__["data"] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, "_imagedata"):
self.__dict__["data"] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (
closed
and self._data_loaded
and _get_array_mmap(self.compressed_data) is not None
):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _update_header_scale_info(self, dtype=None):
if not self._do_not_scale_image_data and not (
self._orig_bzero == 0 and self._orig_bscale == 1
):
for keyword in ["BSCALE", "BZERO"]:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header["BITPIX"]
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed)
)
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
naxis = self._header["ZNAXIS"]
tile_dims = [self._header[f"ZTILE{idx + 1}"] for idx in range(naxis)]
tile_dims.reverse()
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype="uint8").sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return (
(sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000
) + 1
else:
return seed
|
e8c497358a769afe2f70df3666eb2db047ac4bcf79c4ff6257b80edd15d62323 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import os
import sys
import warnings
from contextlib import suppress
from inspect import Parameter, signature
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits.file import _File
from astropy.io.fits.header import Header, _BasicHeader, _DelayedHeader, _pad_length
from astropy.io.fits.util import (
_extract_number,
_free_space_check,
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
decode_ascii,
first,
itersubclasses,
)
from astropy.io.fits.verify import _ErrList, _Verify
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = [
"DELAYED",
# classes
"InvalidHDUException",
"ExtensionHDU",
"NonstandardExtHDU",
]
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {
8: "uint8",
16: "int16",
32: "int32",
64: "int64",
-32: "float32",
-64: "float64",
}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {
"int8": 8,
"uint8": 8,
"int16": 16,
"uint16": 16,
"int32": 32,
"uint32": 32,
"int64": 64,
"uint64": 64,
"float32": -32,
"float64": -64,
}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (
c.__module__.startswith("astropy.io.fits.")
or c in cls._hdu_registry
):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
"An exception occurred matching an HDU header to the "
"appropriate HDU type: {}".format(exc),
AstropyUserWarning,
)
warnings.warn(
"The HDU will be treated as corrupted.", AstropyUserWarning
)
klass = _CorruptedHDU
del exc
break
return klass
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU:
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = "\x00"
_default_name = ""
# _header uses a descriptor to delay the loading of the fits.Header object
# until it is necessary.
_header = _DelayedHeader()
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._header_str = None
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if "DATASUM" in self._header and "CHECKSUM" not in self._header:
self._output_checksum = "datasum"
elif "CHECKSUM" in self._header:
self._output_checksum = True
def __init_subclass__(cls, **kwargs):
# Add the same data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes.
data_prop = cls.__dict__.get("data", None)
if isinstance(data_prop, (lazyproperty, property)) and data_prop.fdel is None:
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
# sys.getrefcount is CPython specific and not on PyPy.
has_getrefcount = hasattr(sys, "getrefcount")
if has_getrefcount:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__["data"]
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if has_getrefcount and data_refcount == 2:
self._file._maybe_close_mmap()
cls.data = data_prop.deleter(data)
return super().__init_subclass__(**kwargs)
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self._header:
self._header["EXTNAME"] = value
else:
self._header["EXTNAME"] = (value, "extension name")
@property
def ver(self):
return self._header.get("EXTVER", 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if "EXTVER" in self._header:
self._header["EXTVER"] = value
else:
self._header["EXTVER"] = (value, "extension value")
@property
def level(self):
return self._header.get("EXTLEVEL", 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if "EXTLEVEL" in self._header:
self._header["EXTLEVEL"] = value
else:
self._header["EXTLEVEL"] = (value, "extension level")
@property
def is_image(self):
return self.name == "PRIMARY" or (
"XTENSION" in self._header
and (
self._header["XTENSION"] == "IMAGE"
or (
self._header["XTENSION"] == "BINTABLE"
and "ZIMAGE" in self._header
and self._header["ZIMAGE"] is True
)
)
)
@property
def _data_loaded(self):
return "data" in self.__dict__ and self.data is not DELAYED
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False, **kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
**kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(
data, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs
)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False, **kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file-like
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(
fileobj, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs
)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
def writeto(self, name, output_verify="exception", overwrite=False, checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : path-like or file-like
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum)
@classmethod
def _from_data(cls, data, header, **kwargs):
"""
Instantiate the HDU object after guessing the HDU class from the
FITS Header.
"""
klass = _hdu_class_from_header(cls, header)
return klass(data=data, header=header, **kwargs)
@classmethod
def _readfrom_internal(
cls, data, header=None, checksum=False, ignore_missing_end=False, **kwargs
):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
try:
# First we try to read the header with the fast parser
# from _BasicHeader, which will read only the standard
# 8 character keywords to get the structural keywords
# that are needed to build the HDU object.
header_str, header = _BasicHeader.fromfile(data)
except Exception:
# If the fast header parsing failed, then fallback to
# the classic Header parser, which has better support
# and reporting for the various issues that can be found
# in the wild.
data.seek(header_offset)
header = Header.fromfile(data, endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype="ubyte", buffer=data)
except TypeError:
raise TypeError(
f"The provided object {data!r} does not contain an underlying "
"memory buffer. fromstring() requires an object that "
"supports the buffer interface such as bytes, buffer, "
"memoryview, ndarray, etc. This restriction is to ensure "
"that efficient access to the array/table data is possible."
)
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx : idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, "", not ignore_missing_end, True
)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
try:
hdu = cls(data=DELAYED, header=header, **new_kwargs)
except TypeError:
# This may happen because some HDU class (e.g. GroupsHDU) wants
# to set a keyword on the header, which is not possible with the
# _BasicHeader. While HDU classes should not need to modify the
# header in general, sometimes this is needed to fix it. So in
# this case we build a full Header and try again to create the
# HDU object.
if isinstance(header, _BasicHeader):
header = Header.fromstring(header_str)
hdu = cls(data=DELAYED, header=header, **new_kwargs)
else:
raise
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
if isinstance(hdu._header, _BasicHeader):
# Delete the temporary _BasicHeader.
# We need to do this before an eventual checksum computation,
# since it needs to modify temporarily the header
#
# The header string is stored in the HDU._header_str attribute,
# so that it can be used directly when we need to create the
# classic Header object, without having to parse again the file.
del hdu._header
hdu._header_str = header_str
# Checksums are not checked on invalid HDU types
if checksum and checksum != "remove" and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, int):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer, offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_pseudo_int_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_pseudo_int_scale_keywords(self):
"""
If the data is signed int 8, unsigned int 16, 32, or 64,
add BSCALE/BZERO cards to header.
"""
if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if "TFIELDS" in self._header:
self._header.set("BSCALE", 1, after="TFIELDS")
elif "GCOUNT" in self._header:
self._header.set("BSCALE", 1, after="GCOUNT")
else:
self._header.set("BSCALE", 1)
self._header.set("BZERO", _pseudo_zero(self.data.dtype), after="BSCALE")
def _update_checksum(
self, checksum, checksum_keyword="CHECKSUM", datasum_keyword="DATASUM"
):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == "remove":
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (
modified
or self._new
or (
checksum
and (
"CHECKSUM" not in self._header
or "DATASUM" not in self._header
or not self._checksum_valid
or not self._datasum_valid
)
)
):
if checksum == "datasum":
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(
checksum_keyword=checksum_keyword, datasum_keyword=datasum_keyword
)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype):
for keyword in ("BSCALE", "BZERO"):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, OSError):
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
# to avoid a bug in the lustre filesystem client, don't
# write zero-byte objects
if size > 0 and _pad_length(size) > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode("ascii"))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, "ubyte", self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if closed and self._data_loaded and _get_array_mmap(self.data) is not None:
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, "CorruptedHDU")
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == "SIMPLE":
if "GROUPS" not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, "NonstandardHDU", len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, "ubyte", self._data_offset)
def _verify(self, option="warn"):
errs = _ErrList([], unit="Card")
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
if header is not None and not isinstance(header, (Header, _BasicHeader)):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError("header must be a Header object")
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ("SIMPLE", "XTENSION")
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
return self._header.data_size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, "_file") and self._file:
return {
"file": self._file,
"filemode": self._file.mode,
"hdrLoc": self._header_offset,
"datLoc": self._data_offset,
"datSpan": self._data_size,
}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option="warn"):
errs = _ErrList([], unit="Card")
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = "XTENSION"
firstval = self._extension
else:
firstkey = "SIMPLE"
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards(
"BITPIX", 1, lambda v: (_is_int(v) and is_valid(v)), 8, option, errs
)
self.req_cards(
"NAXIS", 2, lambda v: (_is_int(v) and 0 <= v <= 999), 0, option, errs
)
naxis = self._header.get("NAXIS", 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = "NAXIS" + str(ax - 2)
self.req_cards(
key,
ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option,
errs,
)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith("NAXIS") and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = (
"NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis)
)
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(
option=option,
err_text=err_text,
fix=fix,
fix_text="Deleted.",
)
)
# Verify that the EXTNAME keyword exists and is a string
if "EXTNAME" in self._header:
if not isinstance(self._header["EXTNAME"], str):
err_text = "The EXTNAME keyword must have a string value."
fix_text = "Converted the EXTNAME keyword to a string value."
def fix(header=self._header):
header["EXTNAME"] = str(header["EXTNAME"])
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = f"'{keyword}' card does not exist."
fix_text = f"Fixed by inserting a new '{keyword}' card."
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(
self.run_option(
option,
err_text=err_text,
fix_text=fix_text,
fix=fix,
fixable=fixable,
)
)
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = f"'{keyword}' card at the wrong place (card {index})."
fix_text = (
f"Fixed by moving it to the right place (card {insert_pos})."
)
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(
self.run_option(
option, err_text=err_text, fix_text=fix_text, fix=fix
)
)
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = f"'{keyword}' card has invalid value '{val}'."
fix_text = f"Fixed by setting a new value '{fix_value}'."
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(
self.run_option(
option,
err_text=err_text,
fix_text=fix_text,
fix=fix,
fixable=fixable,
)
)
return errs
def add_datasum(self, when=None, datasum_keyword="DATASUM"):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = f"data unit checksum updated {self._get_timestamp()}"
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(
self,
when=None,
override_datasum=False,
checksum_keyword="CHECKSUM",
datasum_keyword="DATASUM",
):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = f"HDU checksum updated {self._get_timestamp()}"
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, "0" * 16, when, before=datasum_keyword)
else:
self._header.set(checksum_keyword, "0" * 16, when)
csum = self._calculate_checksum(data_cs, checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if "DATASUM" in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header["DATASUM"]):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if "CHECKSUM" in self._header:
if "DATASUM" in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header["CHECKSUM"]:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if "CHECKSUM" in self._header:
self._checksum = self._header["CHECKSUM"]
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
f"Checksum verification failed for HDU {self.name, self.ver}.\n",
AstropyUserWarning,
)
if "DATASUM" in self._header:
self._datasum = self._header["DATASUM"]
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
f"Datasum verification failed for HDU {self.name, self.ver}.\n",
AstropyUserWarning,
)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(
self._data_size, "ubyte", self._data_offset
)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view("ubyte"))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword="CHECKSUM"):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = "0" * 16
# Convert the header to bytes.
s = self._header.tostring().encode("utf8")
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype="ubyte"), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i : i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view(">u2")
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF]
_EXCLUDE = [0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40,
0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60] # fmt: skip
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord("0")
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient], dtype="int32"
)
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype="byte")
ascii = np.zeros((16,), dtype="byte")
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tobytes())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ""
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
def writeto(self, name, output_verify="exception", overwrite=False, checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v >= 0), 0, option, errs
)
self.req_cards(
"GCOUNT", naxis + 4, lambda v: (_is_int(v) and v == 1), 1, option, errs
)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ("IMAGE", "TABLE", "BINTABLE", "A3DTABLE")
# The check that xtension is not one of the standard types should be
# redundant.
return card.keyword == "XTENSION" and xtension not in standard_xtensions
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, "NonstandardExtHDU", len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, "ubyte", self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
723ec0036e7099dced2910f1323eeae9319a3cee8562867067a9aec841a4f58d | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
exp = [True, True, False, True, False, True, True, True, False, False, True]
temp = f2[1].data.field(7)
assert (temp[0] == exp).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
columns_info = "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]"
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 30, "4R x 10C", columns_info, ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}"
assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}"
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x)
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
# We can't compare the whole array since the _VLF is an array of
# objects, hence we compare elementwise
for i in range(len(hdu[1].data["empty"])):
assert np.array_equal(
hdu[1].data["empty"][i], np.array([], dtype=np.int32)
)
def test_multidim_VLA_tables(self):
"""
Check if multidimensional VLF are correctly write and read.
See https://github.com/astropy/astropy/issues/12860
and https://github.com/astropy/astropy/issues/7810
"""
a = np.arange(5)
b = np.arange(7)
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdus:
print(hdus[1].data["test"][0])
assert hdus[1].columns.formats == ["PD(7)"]
assert np.array_equal(
hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
)
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
)
a = np.arange(10).reshape((5, 2))
b = np.arange(14).reshape((7, 2))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(14)"]
assert np.array_equal(
hdus[1].data["test"][0],
np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]),
)
assert np.array_equal(
hdus[1].data["test"][1],
np.array(
[
[0.0, 1.0],
[2.0, 3.0],
[4.0, 5.0],
[6.0, 7.0],
[8.0, 9.0],
[10.0, 11.0],
[12.0, 13.0],
]
),
)
a = np.arange(3).reshape((1, 3))
b = np.arange(6).reshape((2, 3))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(6)"]
assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]]))
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
519b1ebd6bbe7f100eb15521b6e56d3459c25c2635e4735bee52d0eeee56d2bd | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import warnings
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([("a", 1), ("b", 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header["c"] = 100
assert "c" not in copied_header
# and changing the copy should not change the original.
copied_header["a"] = 0
assert original_header["a"] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([("a", 10)])
new_header = fits.Header(original_header, copy=True)
original_header["a"] = 20
assert new_header["a"] == 10
new_header["a"] = 0
assert original_header["a"] == 20
def test_init_with_dict():
dict1 = {"a": 11, "b": 12, "c": 13, "d": 14, "e": 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate("abcdefghijklmnopqrstuvwxyz")]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
header.rename_keyword("A", "B")
assert "A" not in header
assert "B" in header
assert header[0] == "B"
assert header["B"] == "B"
assert header.comments["B"] == "C"
@pytest.mark.parametrize("key", ["A", "a"])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
assert key in header
assert header[key] == "B"
assert header.get(key) == "B"
assert header.index(key) == 0
assert header.comments[key] == "C"
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert c.keyword == ""
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == "ABC"
assert c.value == "abc"
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card("abc", "<8 ch")
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card("nullstr", "")
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring("ABC = F")
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card("long_int", -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card("floatnum", -467374636747637647347374734737437.0)
if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad(
"FLOATNUM= -4.6737463674763E+032"
):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card("abc", 9, "abcde" * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (
str(c) == "ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
)
c = fits.Card("abc", "a" * 68, "abcdefg")
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)})
pytest.raises(ValueError, fits.Card, "key", [], "comment")
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, "abcdefghi", "long")
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card("abc+", 9)
assert len(w) == 1
assert c.image == _pad("HIERARCH abc+ = 9")
def test_add_history(self):
header = fits.Header(
[
("A", "B", "C"),
("HISTORY", 1),
("HISTORY", 2),
("HISTORY", 3),
("", "", ""),
("", "", ""),
]
)
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header["HISTORY"] == [1, 2, 3, 4]
assert repr(header["HISTORY"]) == "1\n2\n3\n4"
header.add_history(0, after="A")
assert len(header) == 6
assert header.cards[1].value == 0
assert header["HISTORY"] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header(
[("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")]
)
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[""] == [1, 2, 3, "", "", 4]
assert repr(header[""]) == "1\n2\n3\n\n\n4"
header.add_blank(0, after="A")
assert len(header) == 8
assert header.cards[1].value == 0
assert header[""] == [0, 1, 2, 3, "", "", 4]
header[""] = 5
header[" "] = 6
assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6]
assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({"FOO": ("BAR", "BAZ")})
header.update(FakeHeader([("A", 1), ("B", 2, "comment")]))
assert set(header.keys()) == {"FOO", "A", "B"}
assert header.comments["B"] == "comment"
# test that comments are preserved
tmphdr = fits.Header()
tmphdr["HELLO"] = (1, "this is a comment")
header.update(tmphdr)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO"}
assert header.comments["HELLO"] == "this is a comment"
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"}
assert set(header.values()) == {"BAR", 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data("arange.fits"))
hdul[0].header.update({"FOO": ("BAR", "BAZ")})
assert hdul[0].header["FOO"] == "BAR"
assert hdul[0].header.comments["FOO"] == "BAZ"
with pytest.raises(ValueError):
hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")})
hdul.writeto(self.temp("test.fits"))
hdul.close()
hdul = fits.open(self.temp("test.fits"), mode="update")
hdul[0].header.comments["FOO"] = "QUX"
hdul.close()
hdul = fits.open(self.temp("test.fits"))
assert hdul[0].header.comments["FOO"] == "QUX"
hdul[0].header.add_comment(0, after="FOO")
assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0"
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad("HISTORY " + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad("COMMENT " + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value."
)
assert (
c.value == "card has no comments. "
"/ text after slash is still part of the value."
)
assert c.comment == ""
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card("", " / EXPOSURE INFORMATION")
assert str(c) == _pad(" / EXPOSURE INFORMATION")
c = fits.Card.fromstring(str(c))
assert c.keyword == ""
assert c.value == " / EXPOSURE INFORMATION"
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring("ABC = (8, 9)")
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring("abc = + 2.1 e + 12")
assert c.value == 2100000000000.0
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment "
)
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring("ABC = ")
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header["FOO"] = "BAR"
header["UNDEF"] = None
assert list(header.values()) == ["BAR", None]
assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring("XYZ= 100")
assert c.keyword == "XYZ"
assert c.value == 100
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)"
err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'"
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify("fix")
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card(
"WHATEVER",
"SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_"
"03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY"
"_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml",
)
assert (
str(c)
== "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' "
)
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1["TEST"] = "abcdefg" * 30
h2 = fits.Header()
h2["TEST"] = "abcdefg" * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header["TEST1"] = ("Regular value", "Regular comment")
header["TEST2"] = ("long string value " * 10, "long comment " * 10)
header["TEST3"] = ("Regular value", "Regular comment")
assert repr(header).splitlines() == [
str(fits.Card("TEST1", "Regular value", "Regular comment")),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card("TEST3", "Regular value", "Regular comment")),
]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = "long string value " * 10
header = fits.Header()
header[""] = value
assert len(header) == 3
assert " ".join(header[""]) == value.rstrip()
# Ensure that this works like other commentary keywords
header["COMMENT"] = value
header["HISTORY"] = value
assert header["COMMENT"] == header["HISTORY"]
assert header["COMMENT"] == header[""]
def test_long_string_from_file(self):
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
c = hdul[0].header.cards["abc"]
hdul.close()
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10)
assert (
str(c)
== "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment "
)
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1")
+ _pad(
"continue 'continue with long string but without the "
"ampersand at the end' /"
)
+ _pad(
"continue 'continue must have string value (with quotes)' "
"/ comments with ''. "
)
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c)
== "ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. "
)
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad(
"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'"
)
+ _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'")
+ _pad("CONTINUE '&' / pysyn expression")
)
assert c.keyword == "EXPR"
assert (
c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits "
"* 5.87359e-12 * MWAvg(Av=0.12)"
)
assert c.comment == "pysyn expression"
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h["SVALUE"] = "A" * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card("TEST", "long value" * 10, "long comment &" * 10)
assert (
str(c)
== "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & "
)
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(
AstropyUserWarning, match="HIERARCH card will be created"
) as w:
c = fits.Card(
"ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert len(w) == 1
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
# Test manual creation of hierarch card
c = fits.Card("hierarch abcdefghi", 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card(
"HIERARCH ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings."""
filename = fits.util.get_testdata_filepath("compressed_image.fits")
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
# Test also with creation via the Card constructor
c = fits.Card("HIERARCH key.META_4", "calFileVersion")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment")
# This should not raise any exceptions
c.verify("exception")
assert c.keyword == "WeirdCard.~!@#_^$%&"
assert c.value == "The value"
assert c.comment == "a comment"
# Test also the specific case from the original bug report
header = fits.Header(
[
("simple", True),
("BITPIX", 8),
("NAXIS", 0),
("EXTEND", True, "May contain datasets"),
("HIERARCH key.META_0", "detRow"),
]
)
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index("key.META_0")]) == str(
header2.cards[header2.index("key.META_0")]
)
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], "NAXIS")
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header["NAXIS"]
def test_hierarch_card_lookup(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
assert "abcdefghi" in header
assert header["abcdefghi"] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert "ABCDEFGHI" in header
def test_hierarch_card_delete(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
del header["hierarch abcdefghi"]
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["abcdefghi"] = 10
header["abcdefgh"] = 10
header["abcdefg"] = 10
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header["abcdefghij"]
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header[2]
assert list(header.keys())[2] == "abcdefg".upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLAH BLAH": "TESTA"})
assert len(w) == 0
assert "BLAH BLAH" in header
assert header["BLAH BLAH"] == "TESTA"
header.update({"HIERARCH BLAH BLAH": "TESTB"})
assert len(w) == 0
assert header["BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH": "TESTC"})
assert len(w) == 1
assert len(header) == 1
assert header["BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["blah blah"], "TESTD"
header.update({"blah blah": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["blah blah"], "TESTE"
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({"BLAH BLAH BLAH": "TESTA"})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"})
assert len(w) == 3
assert header["BLAH BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH BLAH": "TESTC"})
assert len(w) == 4
assert header["BLAH BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah blah": "TESTD"})
assert len(w) == 4
assert header["blah blah blah"], "TESTD"
header.update({"blah blah blah": "TESTE"})
assert len(w) == 5
assert header["blah blah blah"], "TESTE"
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLA BLA": "TESTA"})
assert len(w) == 0
assert "BLA BLA" in header
assert header["BLA BLA"] == "TESTA"
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 0
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 1
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTE"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({"BLA BLA": "TESTA"})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 1
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 2
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 3
assert len(header) == 1
assert header["bla bla"], "TESTE"
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header["FOO"] = ("bar", "baz", "qux")
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header["FOO"] = ("BAR",)
header["FOO2"] = (None,)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == ""
assert header.comments["FOO"] == ""
def test_header_setitem_2tuple(self):
header = fits.Header()
header["FOO"] = ("BAR", "BAZ")
header["FOO2"] = (None, None)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == "BAZ"
assert header.comments["FOO"] == "BAZ"
assert header.comments["FOO2"] == ""
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header["FOO"] = "BAR"
assert header["FOO"] == "BAR"
header["FOO"] = None
assert header["FOO"] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep="\n")
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header["UNDEF3"] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header["DEFINED"] == 42
assert header["UNDEF"] is None
assert header["UNDEF2"] is None
assert header["UNDEF3"] is None
assert header["UNDEF5"] is None
assert header["UNDEF6"] is None
# Assign an undefined value to a new card
header["UNDEF4"] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([("A", "B", "C")])
header.set("A", comment="D")
assert header["A"] == "B"
assert header.comments["A"] == "D"
def test_header_iter(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header) == ["A", "C"]
def test_header_slice(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
newheader = header[1:]
assert len(newheader) == 2
assert "A" not in newheader
assert "C" in newheader
assert "E" in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == "F"
assert newheader[1] == "D"
assert newheader[2] == "B"
newheader = header[::2]
assert len(newheader) == 2
assert "A" in newheader
assert "C" not in newheader
assert "E" in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = "GH"
assert header[1] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header[1:] = ["H", "I"]
assert header[1] == "H"
assert header[2] == "I"
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
del header[1:]
assert len(header) == 1
assert header[0] == "B"
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
newheader = header["AB*"]
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)])
assert len(header["DATE*"]) == 3
assert len(header["DATE?*"]) == 2
assert len(header["DATE-*"]) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header["AB*"] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header["AB*"] = "GH"
assert header[0] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header["AB*"] = ["H", "I"]
assert header[0] == "H"
assert header[2] == "I"
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
del header["AB*"]
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header(
[
("ABC", 0),
("HISTORY", 1),
("HISTORY", 2),
("DEF", 3),
("HISTORY", 4),
("HISTORY", 5),
]
)
assert header["HISTORY"] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([("A", "B"), ("C", "D")])
header.clear()
assert "A" not in header
assert "C" not in header
assert len(header) == 0
@pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header["FOO"] = "BAR"
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp("temp.fits"), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(["A", "B"])
assert "A" in header
assert header["A"] is None
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] is None
assert header.comments["B"] == ""
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(["A", "B"], "C")
assert "A" in header
assert header["A"] == "C"
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] == "C"
assert header.comments["B"] == ""
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(["A"], ("B", "C"))
assert "A" in header
assert header["A"] == "B"
assert header.comments["A"] == "C"
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(["A", "B", "A"], "C")
assert "A" in header
assert ("A", 0) in header
assert ("A", 1) in header
assert ("A", 2) not in header
assert header[0] == "C"
assert header["A"] == "C"
assert header[("A", 0)] == "C"
assert header[2] == "C"
assert header[("A", 1)] == "C"
def test_header_items(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header.items()) == [("A", "B"), ("C", "D")]
def test_header_iterkeys(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.values(), ["B", "D"]):
assert a == b
def test_header_keys(self):
with fits.open(self.data("arange.fits")) as hdul:
assert list(hdul[0].header) == [
"SIMPLE",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"NAXIS3",
"EXTEND",
]
def test_header_list_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
last = header.pop()
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop(1)
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop(0)
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
pytest.raises(TypeError, header.pop, "A", "B", "C")
last = header.pop("G")
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop("C")
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop("A")
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
default = header.pop("X", "Y")
assert default == "Y"
assert len(header) == 1
pytest.raises(KeyError, header.pop, "X")
def test_popitem(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.setdefault("A") == "B"
assert header.setdefault("C") == "D"
assert header.setdefault("E") == "F"
assert len(header) == 3
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
assert "G" in header
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update({"A": "E", "F": "G"})
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([("A", "B"), ("C", "D")])
header.update(A="E", F="G")
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update([("A", "E"), fits.Card("F", "G")])
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header["MYKEY"] = ("some val", "some comment")
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == "XTENSION"
assert hdu.header[-1] == "some val"
assert ("MYKEY", 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == "some val"
assert hdu.header[-1] == "some other val"
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu2.header["HISTORY"] = "history 1"
hdu2.header["HISTORY"] = "history 2"
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) in hdu.header
assert hdu.header[("MYKEY", 1)] == "some other val"
assert len(hdu.header["HISTORY"]) == 3
assert hdu.header[-1] == "history 2"
hdu = fits.PrimaryHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) not in hdu.header
assert hdu.header["MYKEY"] == "some other val"
assert len(hdu.header["HISTORY"]) == 2
assert hdu.header[-1] == "history 2"
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data("test0.fits"))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.count("A") == 1
assert header.count("C") == 1
assert header.count("E") == 1
header["HISTORY"] = "a"
header["HISTORY"] = "b"
assert header.count("HISTORY") == 2
pytest.raises(KeyError, header.count, "G")
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ""
assert header[-2] == ""
# New card should fill the first blank by default
header.append(("E", "F"))
assert len(header) == 4
assert header[-2] == "F"
assert header[-1] == ""
# This card should not use up a blank spot
header.append(("G", "H"), useblanks=False)
assert len(header) == 5
assert header[-1] == ""
assert header[-2] == "H"
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.append("E")
assert len(header) == 3
assert list(header)[-1] == "E"
assert header[-1] is None
assert header.comments["E"] == ""
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append("")
assert len(header) == 4
assert list(header)[-1] == ""
assert header[""] == ""
assert header.comments[""] == ""
def test_header_insert_use_blanks(self):
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ("E", "F"))
assert len(header) == 4
assert header[1] == "F"
assert header[-1] == ""
assert header[-2] == "D"
# Insert a new card without using blanks
header.insert(1, ("G", "H"), useblanks=False)
assert len(header) == 5
assert header[1] == "H"
assert header[-1] == ""
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header(
[("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")]
)
header.insert("NAXIS1", ("NAXIS", 2, "Number of axes"))
assert list(header.keys())[0] == "NAXIS"
assert header[0] == 2
assert header.comments[0] == "Number of axes"
header.insert("NAXIS1", ("NAXIS2", 20), after=True)
assert list(header.keys())[1] == "NAXIS1"
assert list(header.keys())[2] == "NAXIS2"
assert header[2] == 20
header.insert(("COMMENT", 1), ("COMMENT", "Comment 2"))
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"]
header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True)
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"]
header.insert(-1, ("TEST1", True))
assert list(header.keys())[-2] == "TEST1"
header.insert(-1, ("TEST2", True), after=True)
assert list(header.keys())[-1] == "TEST2"
assert list(header.keys())[-3] == "TEST1"
def test_remove(self):
header = fits.Header([("A", "B"), ("C", "D")])
# When keyword is present in the header it should be removed.
header.remove("C")
assert len(header) == 1
assert list(header) == ["A"]
assert "C" not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove("F")
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove("F", ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")])
header.remove("A", remove_all=True)
assert "A" not in header
assert len(header) == 1
assert list(header) == ["C"]
assert header[0] == "D"
def test_header_comments(self):
header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")])
assert repr(header.comments) == " A C\n DEF H"
def test_comment_slices_and_filters(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
s = header.comments[1:]
assert list(s) == ["H", "K"]
s = header.comments[::-1]
assert list(s) == ["K", "H", "D"]
s = header.comments["A*"]
assert list(s) == ["D", "K"]
def test_comment_slice_filter_assign(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
header.comments[1:] = "L"
assert list(header.comments) == ["D", "L", "L"]
assert header.cards[header.index("AB")].comment == "D"
assert header.cards[header.index("EF")].comment == "L"
assert header.cards[header.index("AI")].comment == "L"
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ["L", "L", "D"]
header.comments["A*"] = ["M", "N"]
assert list(header.comments) == ["M", "L", "N"]
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header["HISTORY"] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header["HISTORY"][1:] == indices[1:]
assert header["HISTORY"][:3] == indices[:3]
assert header["HISTORY"][:6] == indices[:6]
assert header["HISTORY"][:-2] == indices[:-2]
assert header["HISTORY"][::-1] == indices[::-1]
assert header["HISTORY"][1::-1] == indices[1::-1]
assert header["HISTORY"][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ("A", "B", "C"))
header.append(("D", "E", "F"), end=True)
assert list(header["HISTORY"][1:]) == indices[1:]
assert list(header["HISTORY"][:3]) == indices[:3]
assert list(header["HISTORY"][:6]) == indices[:6]
assert list(header["HISTORY"][:-2]) == indices[:-2]
assert list(header["HISTORY"][::-1]) == indices[::-1]
assert list(header["HISTORY"][1::-1]) == indices[1::-1]
assert list(header["HISTORY"][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header["FOO"] = "BAR"
header["HISTORY"] = "ABC"
header["FRED"] = "BARNEY"
header["HISTORY"] = "DEF"
header["HISTORY"] = "GHI"
assert header["HISTORY"] == ["ABC", "DEF", "GHI"]
# Single value update
header["HISTORY"][0] = "FOO"
assert header["HISTORY"] == ["FOO", "DEF", "GHI"]
# Single value partial slice update
header["HISTORY"][1:] = "BAR"
assert header["HISTORY"] == ["FOO", "BAR", "BAR"]
# Multi-value update
header["HISTORY"][:] = ["BAZ", "QUX"]
assert header["HISTORY"] == ["BAZ", "QUX", "BAR"]
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header["HISTORY"] = "hello world"
header["HISTORY"] = "hello world"
header["COMMENT"] = "hello world"
assert header["HISTORY"] != header["COMMENT"]
header["COMMENT"] = "hello world"
assert header["HISTORY"] == header["COMMENT"]
def test_long_commentary_card(self):
header = fits.Header()
header["FOO"] = "BAR"
header["BAZ"] = "QUX"
longval = "ABC" * 30
header["HISTORY"] = longval
header["FRED"] = "BARNEY"
header["HISTORY"] = longval
assert len(header) == 7
assert list(header)[2] == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.set("HISTORY", longval, after="FOO")
assert len(header) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
header = fits.Header()
header.update({"FOO": "BAR"})
header.update({"BAZ": "QUX"})
longval = "ABC" * 30
header.add_history(longval)
header.update({"FRED": "BARNEY"})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.add_history(longval, after="FOO")
assert len(header.cards) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp("header.txt")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp("header.fits")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711"""
filename = self.data("scale.fits")
hdr = fits.Header.fromfile(filename)
assert hdr["DATASET"] == "2MASS"
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header["A"] = ("B", "C")
header["B"] = ("C", "D")
header["C"] = ("D", "E")
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
f.write("\nEND")
new_header = fits.Header.fromtextfile(self.temp("test.hdr"))
assert "END" not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, "END", "")
pytest.raises(ValueError, header.append, "END")
pytest.raises(ValueError, header.append, "END", end=True)
pytest.raises(ValueError, header.insert, len(header), "END")
pytest.raises(ValueError, header.set, "END")
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep="", endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += " " * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header("END =", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header("END = ", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header("END$%&%^*%*", True)
with pytest.warns(
AstropyUserWarning,
match=r"Unexpected bytes trailing END keyword: '\$%&%\^\*%\*'",
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header("END", False)
with pytest.warns(
AstropyUserWarning, match="Missing padding to end of the FITS block"
) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h["FOO"] = "BAR"
h["COMMENT"] = "hello"
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
out = f.read()
out = out.replace(b"hello", "héllo".encode("latin1"))
out = out.replace(b"BAR", "BÀR".encode("latin1"))
with open(self.temp("test2.fits"), "wb") as f2:
f2.write(out)
with pytest.warns(
AstropyUserWarning,
match="non-ASCII characters are present in the FITS file",
) as w:
h = fits.getheader(self.temp("test2.fits"))
assert h["FOO"] == "B?R"
assert h["COMMENT"] == "h?llo"
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")])
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after=0)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before="C")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after="A")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set("C", before=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("C", after=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep="\n")
# First the case that *does* work prior to fixing this issue
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep="\n")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h["FOCALLEN"] = 155.0
h["APERTURE"] = 0.0
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header["TEST"] = 5.0022221e-07
hdu.writeto(self.temp("test.fits"))
# Here we manually make the file invalid
with open(self.temp("test.fits"), "rb+") as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii("e"))
with fits.open(self.temp("test.fits")) as hdul, pytest.warns(
AstropyUserWarning
) as w:
hdul.writeto(self.temp("temp.fits"), output_verify="warn")
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad("FOO = T")
barimg = _pad("BAR = F")
h = fits.Header()
h["FOO"] = True
h["BAR"] = False
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h["FOO"] = np.bool_(True)
h["BAR"] = np.bool_(False)
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)])
assert list(h) == ["ABC", "DEF", "GEH"]
assert "abc" in h
assert "dEf" in h
assert h["geh"] == 3
# Case insensitivity of wildcards
assert len(h["g*"]) == 1
h["aBc"] = 2
assert h["abc"] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h["gEh"]
assert list(h) == ["ABC", "DEF"]
assert len(h) == 2
assert h.get("def") == 2
h.set("Abc", 3)
assert h["ABC"] == 3
h.set("gEh", 3, before="Abc")
assert list(h) == ["GEH", "ABC", "DEF"]
assert h.pop("abC") == 3
assert len(h) == 2
assert h.setdefault("def", 3) == 2
assert len(h) == 2
assert h.setdefault("aBc", 1) == 1
assert len(h) == 3
assert list(h) == ["GEH", "DEF", "ABC"]
h.update({"GeH": 1, "iJk": 4})
assert len(h) == 4
assert list(h) == ["GEH", "DEF", "ABC", "IJK"]
assert h["GEH"] == 1
assert h.count("ijk") == 1
assert h.index("ijk") == 3
h.remove("Def")
assert len(h) == 3
assert list(h) == ["GEH", "ABC", "IJK"]
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header["TESTKW"] = ("Test val", "This is the END")
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp("test.hdr"))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = "\u30a8\u30ea\u30c3\u30af"
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h["FOO"] = "BAR"
assert "FOO" in h
assert h["FOO"] == "BAR"
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, "BAR")
h["FOO"] = "BAZ"
assert h["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, "FOO", erikku)
h["FOO"] = ("BAR", "BAZ")
assert h["FOO"] == "BAR"
assert h.comments["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, "FOO", ("BAR", erikku))
pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ"))
pytest.raises(ValueError, assign, "FOO", (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set("TEST", b"Hello")
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h["FOO"] = "Bar "
assert h["FOO"] == "Bar"
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp("strip_header_whitespace", False):
assert h["FOO"] == "Bar "
assert h["QUX"] == "Bar "
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
assert h["FOO"] == "Bar"
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = [
"CCD parameters table ...",
" reference table oref$n951041ko_ccd.fits",
" INFLIGHT 12/07/2001 25/02/2002",
" all bias frames",
] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header["HISTORY"] = item
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"]
new_hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30")
c2 = fits.Card.fromstring("Just some random text.")
c3 = fits.Card.fromstring("A" * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert "CLFIND2D" in header
assert "Just som" in header
assert "AAAAAAAA" in header
assert header["CLFIND2D"] == ": contour = 0.30"
assert header["Just som"] == "e random text."
assert header["AAAAAAAA"] == "A" * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, "CLFIND2D", "foo")
pytest.raises(ValueError, header.set, "Just som", "foo")
pytest.raises(ValueError, header.set, "AAAAAAAA", "foo")
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
c.verify("fix")
assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6")
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, "TEST", float("nan"))
pytest.raises(ValueError, h.set, "TEST", np.nan)
pytest.raises(ValueError, h.set, "TEST", np.float32("nan"))
pytest.raises(ValueError, h.set, "TEST", float("inf"))
pytest.raises(ValueError, h.set, "TEST", np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([("TEST", True)])
h["TEST"] = 1
assert h["TEST"] is not True
assert isinstance(h["TEST"], int)
assert h["TEST"] == 1
h["TEST"] = np.bool_(True)
assert h["TEST"] is True
h["TEST"] = False
assert h["TEST"] is False
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
h["TEST"] = 0
assert h["TEST"] is not False
assert isinstance(h["TEST"], int)
assert h["TEST"] == 0
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h["TEST"] = 1
# int -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# int -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# Now the same tests but with zeros
h["TEST"] = 0
# int -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
# int -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, "HISTORY", "\n")
pytest.raises(ValueError, h.set, "HISTORY", "\nabc")
pytest.raises(ValueError, h.set, "HISTORY", "abc\n")
pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef")
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if "\n" in card_image:
pytest.raises(fits.VerifyError, c.verify, "exception")
else:
c.verify("exception")
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = "abc" * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(("history", value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == "HISTORY" and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data("test0.fits"), "rb") as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data("test0.fits"))
assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"]
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr["KEY2 "] = 2
hdr["KEY2 "] = 4
assert len(hdr) == 1
assert hdr["KEY2"] == 4
assert hdr["KEY2 "] == 4
def test_strip(self):
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr.strip()
assert set(hdr) == {"HISTORY", "FOO"}
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr = hdr.copy(strip=True)
assert set(hdr) == {"HISTORY", "FOO"}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring("KW = INF / Comment")
card.value = "FIXED"
assert tuple(card) == ("KW", "FIXED", "Comment")
card.verify("fix")
assert tuple(card) == ("KW", "FIXED", "Comment")
card = fits.Card.fromstring("KW = INF")
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp("bogus.fits"))
with fits.open(self.temp("bogus.fits")) as hdul:
hdul[0].header["KW"] = -1
hdul.writeto(self.temp("bogus_fixed.fits"))
with fits.open(self.temp("bogus_fixed.fits")) as hdul:
assert hdul[0].header["KW"] == -1
def test_index_numpy_int(self):
header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")])
idx = np.int8(2)
assert header[idx] == "BAR"
header[idx] = "BAZ"
assert header[idx] == "BAZ"
header.insert(idx, ("D", 42))
assert header[idx] == 42
header.add_comment("HELLO")
header.add_comment("WORLD")
assert header["COMMENT"][np.int64(1)] == "WORLD"
header.append(("C", "BAZBAZ"))
assert header[("C", np.int16(0))] == "BAZ"
assert header[("C", np.uint32(1))] == "BAZBAZ"
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header["BITPIX"] = 32
header["NAXIS"] = 2
header["NAXIS1"] = 100
header["NAXIS2"] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup_method(self):
super().setup_method()
self._test_header = fits.Header()
self._test_header.set("DP1", "NAXIS: 2")
self._test_header.set("DP1", "AXIS.1: 1")
self._test_header.set("DP1", "AXIS.2: 2")
self._test_header.set("DP1", "NAUX: 2")
self._test_header.set("DP1", "AUX.1.COEFF.0: 0")
self._test_header.set("DP1", "AUX.1.POWER.0: 1")
self._test_header.set("DP1", "AUX.1.COEFF.1: 0.00048828125")
self._test_header.set("DP1", "AUX.1.POWER.1: 1")
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
assert c.comment == "A comment"
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.1
assert c.field_specifier == "NAXIS"
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1", "NAXIS: 2")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: 2.0")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: a")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1.NAXIS", 2)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card("DP1.NAXIS", "a")
assert c.keyword == "DP1.NAXIS"
assert c.value == "a"
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.comment == "A comment"
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
c.field_specifier = "NAXIS1"
assert c.field_specifier == "NAXIS1"
assert c.keyword == "DP1.NAXIS1"
assert c.value == 2.0
assert c.comment == "A comment"
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set("abc.def", 1)
header.set("abc.DEF", 2)
assert header["abc.def"] == 1
assert header["ABC.def"] == 1
assert header["aBc.def"] == 1
assert header["ABC.DEF"] == 2
assert "ABC.dEf" not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header["DP1"] == "NAXIS: 2"
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header["DP1.NAXIS"] == 2.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
assert self._test_header["DP1.AUX.1.COEFF.1"] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header["DP1.AXIS.3"]
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header["DP1.NAXIS"] == 3.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
self._test_header["DP1.AXIS.1"] = 1.1
assert self._test_header["DP1.AXIS.1"] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h["D2IM1.EXTVER"] = 1
assert h["D2IM1.EXTVER"] == 1.0
h["D2IM1.EXTVER"] = 2
assert h["D2IM1.EXTVER"] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2"
c = fits.Card("DP1.NAXIS", 2)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set("DP1", "AXIS.3: 1", "a comment", after="DP1.AXIS.2")
assert self._test_header[3] == 1
assert self._test_header["DP1.AXIS.3"] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header["DP1.AXIS.1"]
assert len(self._test_header) == 7
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.AXIS.2"
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header["DP1.AXIS.2"]
assert len(self._test_header) == 6
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header["DP1.AXIS.*"]
assert isinstance(cl, fits.Header)
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
cl = self._test_header["DP1.N*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'",
]
cl = self._test_header["DP1.AUX..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl = self._test_header["DP?.NAXIS"]
assert [str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"]
cl = self._test_header["DP1.A*S.*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header["DP1.A*..."]
assert len(self._test_header) == 2
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header["DP1.A*..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl2 = cl["*.*AUX..."]
assert [str(c).strip() for c in cl2.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl) == ["DP1.AXIS.1", "DP1.AXIS.2"]
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header["DP1.AXIS.*"]
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h["HISTORY"] = "AXIS.1: 2"
h["HISTORY"] = "AXIS.2: 2"
assert "HISTORY.AXIS" not in h
assert "HISTORY.AXIS.1" not in h
assert "HISTORY.AXIS.2" not in h
assert h["HISTORY"] == ["AXIS.1: 2", "AXIS.2: 2"]
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h["HISTORY"] = "Date: 2012-09-19T13:58:53.756061"
assert "HISTORY.Date" not in h
assert str(h.cards[0]) == _pad("HISTORY Date: 2012-09-19T13:58:53.756061")
c = fits.Card.fromstring(" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ""
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h["FOO"] = "Date: 2012-09-19T13:58:53.756061"
assert "FOO.Date" not in h
assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h["FOO"] == "AXIS.1: 2"
assert h[("FOO", 1)] == "AXIS.2: 4"
assert h["FOO.AXIS.1"] == 2.0
assert h["FOO.AXIS.2"] == 4.0
assert "FOO.AXIS" not in h
assert "FOO.AXIS." not in h
assert "FOO." not in h
pytest.raises(KeyError, lambda: h["FOO.AXIS"])
pytest.raises(KeyError, lambda: h["FOO.AXIS."])
pytest.raises(KeyError, lambda: h["FOO."])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data("zerowidth.fits"))
output = hf.parse(extensions=["AIPS FQ"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split("\n")) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1], keywords=["EXTNAME", "BITPIX"])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split("\n")) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=["NAXIS*"])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data("test0.fits"))
assert "EXTNAME = 'SCI" in hf.parse(extensions=["SCI,2"])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data("comp.fits"))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True)
hf.close()
def test_fitsheader_compressed_from_primary_image_ext(self):
"""Regression test for issue https://github.com/astropy/astropy/issues/7312"""
data = np.arange(2 * 2, dtype=np.int8).reshape((2, 2))
phdu = fits.PrimaryHDU(data=data)
chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header)
chdu.writeto(self.temp("tmp2.fits"), overwrite=True)
with fits.open(self.temp("tmp2.fits")) as hdul:
assert "XTENSION" not in hdul[1].header
assert "PCOUNT" not in hdul[1].header
assert "GCOUNT" not in hdul[1].header
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data("zerowidth.fits")
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=["AIPS FQ", 2, "4"])
assert len(mytable) == (
len(fitsobj["AIPS FQ"].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header)
)
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=["AIPS FQ"])
assert np.all(mytable["filename"] == test_filename)
assert np.all(mytable["hdu"] == "AIPS FQ")
assert mytable["value"][mytable["keyword"] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert len(mytable) == 1
assert mytable["hdu"][0] == "AIPS FQ"
assert mytable["keyword"][0] == "EXTNAME"
assert mytable["value"][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=["DOES_NOT_EXIST"])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["DOES_NOT_EXIST"])
assert mytable is None
formatter.close()
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_hdu_writeto_mode(self, mode):
with open(self.temp("mode.fits"), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ("no comment",)
return super().append(card, *args, **kwargs)
my_header = MyHeader(
(
("a", 1.0, "first"),
("b", 2.0, "second"),
(
"c",
3.0,
),
)
)
assert my_header.comments["a"] == "first"
assert my_header.comments["b"] == "second"
assert my_header.comments["c"] == "no comment"
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments["b"] == "second"
assert slice_.comments["c"] == "no comment"
selection = my_header["c*"]
assert type(selection) is MyHeader
assert selection.comments["c"] == "no comment"
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments["b"] == "second"
assert copy_.comments["c"] == "no comment"
my_header.extend((("d", 4.0),))
assert my_header.comments["d"] == "no comment"
|
de4112c94d0fe7c76985d39b9f2962edd318d6589054101d150fdf9abc3bed3a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import DITHER_SEED_CHECKSUM, SUBTRACTIVE_DITHER_1
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from .conftest import FitsTestCase
from .test_table import comparerecords
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = fits.ImageHDU(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = fits.ImageHDU(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header["EXTVER"] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert "EXTVER" not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr["EXTVER"] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr["FILENAME"] = "labq01i3q_rawtag.fits"
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert phdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data("test0.fits")) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def test_open_2(self):
r = fits.open(self.data("test0.fits"))
info = [(0, "PRIMARY", 1, "PrimaryHDU", 138, (), "", "")] + [
(x, "SCI", x, "ImageHDU", 61, (40, 40), "int16", "") for x in range(1, 5)
]
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data("test0.fits"))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data("test0.fits"), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == "list index out of range"
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data("test0.fits"))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp("a_str.fits")
bfits = self.temp("b_str.fits")
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp("a_fileobj.fits")
bbfits = self.temp("b_fileobj.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp("a_str_slice.fits")
bfits = self.temp("b_str_slice.fits")
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp("a_fileobj_slice.fits")
bbfits = self.temp("b_fileobj_slice.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([("EXTNAME", "XPRIMARY"), ("EXTVER", 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert "EXTNAME" in hdul[0].header
assert hdul[0].name == "XPRIMARY"
assert hdul[0].name == hdul[0].header["EXTNAME"]
info = [(0, "XPRIMARY", 1, "PrimaryHDU", 5, (), "", "")]
assert hdul.info(output=False) == info
assert hdul["PRIMARY"] is hdul["XPRIMARY"]
assert hdul["PRIMARY"] is hdul[("XPRIMARY", 1)]
hdul[0].name = "XPRIMARY2"
assert hdul[0].header["EXTNAME"] == "XPRIMARY2"
hdul.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].name == "XPRIMARY2"
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data("test0.fits")) as r:
assert r["primary"].header["naxis"] == 0
assert r[0].header["naxis"] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r["sci", 1].header["detector"] == 1
# append (using "update()") a new card
r[0].header["xxx"] = 1.234e56
assert (
"\n".join(str(x) for x in r[0].header.cards[-3:])
== "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 "
)
# rename a keyword
r[0].header.rename_keyword("filename", "fname")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "history")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "simple")
r[0].header.rename_keyword("fname", "filename")
# get a subsection of data
assert np.array_equal(
r[2].data[:3, :3],
np.array(
[[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16
),
)
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp("test_new.fits"), mode="append") as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp("test_new.fits"), self.temp("test_append.fits"))
with fits.open(self.temp("test_append.fits"), mode="append") as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp("test_append.fits"), self.temp("test_update.fits"))
with fits.open(self.temp("test_update.fits"), mode="update") as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header["rootname"] == "U2EQ0201T"
u[0].header["rootname"] = "abc"
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp("test_new.fits"))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data("test0.fits")) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name="SCI")
assert np.array_equal(
hdu.data,
np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
],
dtype=np.float32,
),
)
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype="int32"))
assert (
"\n".join(str(x) for x in hdu2.header.cards[1:5])
== "BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters "
)
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data("test0.fits"), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(
AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\."
) as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(
AstropyUserWarning,
match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.",
) as w:
hdu.writeto(self.temp("test_new2.fits"), "fix")
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data("arange.fits"))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]),
)
assert np.array_equal(
fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])
)
assert np.array_equal(
fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array(
[
[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384],
]
),
)
assert np.array_equal(
fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]]),
)
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(
fs[0].section[3:6, :, :][:3, :3, :3],
np.array(
[
[[330, 331, 332], [341, 342, 343], [352, 353, 354]],
[[440, 441, 442], [451, 452, 453], [462, 463, 464]],
[[550, 551, 552], [561, 562, 563], [572, 573, 574]],
]
),
)
assert np.array_equal(
fs[0].section[:, :, :][:3, :2, :2],
np.array(
[[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]]
),
)
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3])
bool_index = np.array(
[True, False, True, True, False, False, True, True, False, True]
)
assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :])
assert np.array_equal(fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3])
# Can we use negative indices?
assert np.array_equal(fs[0].section[-1], dat[-1])
assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7])
assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
]:
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype(">i2")
with fits.open(self.data("scale.fits")) as hdul:
assert hdul[0].data.dtype == np.dtype("float32")
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp("test_new.fits"), data=np.array([], dtype="uint8"))
d = np.zeros([100, 100]).astype("uint16")
fits.append(self.temp("test_new.fits"), data=d)
with fits.open(self.temp("test_new.fits"), uint=True) as f:
assert f[1].data.dtype == "uint16"
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type="uint8", bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2**int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f"uint{int_size}"
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
filename = f"uint{int_size}.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in new_uint_hdu.header
assert new_uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
@pytest.mark.parametrize(("from_file"), (False, True))
@pytest.mark.parametrize(("do_not_scale"), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(
self, from_file, do_not_scale
):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype="uint16")
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = "unsigned_int.fits"
tmp_uint.writeto(self.temp(filename))
with fits.open(
self.temp(filename), do_not_scale_image_data=do_not_scale
) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert "BSCALE" in uint_hdu.header
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BSCALE"] == 1
assert uint_hdu.header["BZERO"] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header["BITPIX"] < 0
# BSCALE and BZERO should NOT be in header any more.
assert "BSCALE" not in uint_hdu.header
assert "BZERO" not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = "test_uint_to_float.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header["BLANK"] = 999
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header["BLANK"] = 2
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
hdu.writeto(self.temp("test_new.fits"))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
with fits.open(self.temp("test_new.fits")) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale("int16", bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp("test.fits")
hdu.data[0] = 9999
hdu.header["BLANK"] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(
fits.verify.VerifyWarning, match=r"Invalid 'BLANK' keyword in header"
):
hdul.writeto(self.temp("test2.fits"))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp("test2.fits")) as hdul2:
assert "BLANK" not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True, mode="update") as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename, do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header["BLANK"] == 9999
assert hdul4[0].header["BSCALE"] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header["BZERO"] = 1.0
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data("fixed-1890.fits"))
orig_data = hdul[0].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data("fixed-1890.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data("fixed-1890.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file("test0.fits")
with fits.open(self.temp("test0.fits"), mode="update") as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy["NAXIS*"]
hdul[1].header = hdr_copy
with fits.open(self.temp("test0.fits")) as hdul:
assert (orig_data == hdul[1].data).all()
# The test below raised a `ResourceWarning: unclosed transport` exception
# due to a bug in Python <=3.10 (cf. cpython#90476)
@pytest.mark.filterwarnings("ignore:unclosed transport <asyncio.sslproto")
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file("scale.fits")
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file("scale.fits")
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[0].header["BITPIX"]
orig_bzero = hdul[0].header["BZERO"]
orig_bscale = hdul[0].header["BSCALE"]
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].header["BITPIX"] == orig_bitpix
assert hdul[0].header["BZERO"] == orig_bzero
assert hdul[0].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("test0.fits")) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].data is None
assert h[1].header["NAXIS"] == 0
assert "NAXIS1" not in h[1].header
assert "NAXIS2" not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header["BLANK"] = "nan"
with pytest.warns(
fits.verify.VerifyWarning,
match=r"Invalid value for 'BLANK' keyword in header: 'nan'",
):
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as (
hdu,
):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r"data object array\(1\) should have at least one dimension"
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update") as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp("test.fits")) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
("data", "compression_type", "quantize_level"),
[
(np.zeros((2, 10, 10), dtype=np.float32), "RICE_1", 16),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_1", -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_2", -0.01),
(np.zeros((100, 100)) + 1, "HCOMPRESS_1", 16),
(np.zeros((10, 10)), "PLIO_1", 16),
],
)
@pytest.mark.parametrize("byte_order", ["<", ">"])
def test_comp_image(self, data, compression_type, quantize_level, byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(
data,
name="SCI",
compression_type=compression_type,
quantize_level=quantize_level,
)
ofd.append(chdu)
ofd.writeto(self.temp("test_new.fits"), overwrite=True)
ofd.close()
with fits.open(self.temp("test_new.fits")) as fd:
assert (fd[1].data == data).all()
assert fd[1].header["NAXIS"] == chdu.header["NAXIS"]
assert fd[1].header["NAXIS1"] == chdu.header["NAXIS1"]
assert fd[1].header["NAXIS2"] == chdu.header["NAXIS2"]
assert fd[1].header["BITPIX"] == chdu.header["BITPIX"]
@pytest.mark.remote_data
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import pickle
np.random.seed(42)
# Basically what scipy.datasets.ascent() does.
fname = download_file(
"https://github.com/scipy/dataset-ascent/blob/main/ascent.dat?raw=true"
)
with open(fname, "rb") as f:
scipy_data = np.array(pickle.load(f))
data = scipy_data + np.random.randn(512, 512) * 10
fits.ImageHDU(data).writeto(self.temp("im1.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-1,
dither_seed=5,
).writeto(self.temp("im2.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-100,
dither_seed=5,
).writeto(self.temp("im3.fits"))
im1 = fits.getdata(self.temp("im1.fits"))
im2 = fits.getdata(self.temp("im2.fits"))
im3 = fits.getdata(self.temp("im3.fits"))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(
ValueError,
fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32),
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_size=[2, 10, 10],
)
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(
data=cube,
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_size=[5, 5, 1],
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul["SCI"].data - cube).max() < 1.0 / 15.0
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view("uint8").sum() % 10000) + 1
hdu = fits.CompImageHDU(
data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM,
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert "ZQUANTIZ" in hdul[1]._header
assert hdul[1]._header["ZQUANTIZ"] == "SUBTRACTIVE_DITHER_1"
assert "ZDITHER0" in hdul[1]._header
assert hdul[1]._header["ZDITHER0"] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data("comp.fits"), disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data("comp.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file("comp.fits")
mtime = os.stat(self.temp("comp.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("comp.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("comp.fits")).st_mtime
@pytest.mark.slow
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(
hdul1[1].compressed_data, hdul2[1].compressed_data
)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(
self.data("fixed-1890.fits"), do_not_scale_image_data=True
) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("fixed-1890-z.fits"))
hdul = fits.open(self.temp("fixed-1890-z.fits"))
orig_data = hdul[1].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp("fixed-1890-z.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp("fixed-1890-z.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[1].header["BITPIX"]
orig_bzero = hdul[1].header["BZERO"]
orig_bscale = hdul[1].header["BSCALE"]
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[1].header["BITPIX"] == orig_bitpix
assert hdul[1].header["BZERO"] == orig_bzero
assert hdul[1].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data("scale.fits")) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type="GZIP_1")
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(
data=noise, compression_type="GZIP_1", quantize_level=0.0
) # No quantization
chdu2.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = (np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange(
1, 7
)
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[: data2.shape[0], : data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type="RICE_1", tile_size=(6, 7))
chdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), disable_image_compression=True) as h:
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM1"])
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM2"])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file("comp.fits")
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header["test1"] = "test"
hdul[1]._header["test2"] = "test2"
with fits.open(self.temp("comp.fits")) as hdul:
assert "test1" in hdul[1].header
assert hdul[1].header["test1"] == "test"
assert "test2" in hdul[1].header
assert hdul[1].header["test2"] == "test2"
# Test update via index now:
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
hdr[hdr.index("TEST1")] = "foo"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["TEST1"] == "foo"
# Test slice updates
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header["TEST*"] = "qux"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["qux", "qux"]
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
idx = hdr.index("TEST1")
hdr[idx : idx + 2] = "bar"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["bar", "bar"]
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header[("COMMENT", 1)] = "I am fire. I am death!"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["COMMENT"][1] == "I am fire. I am death!"
assert hdul[1]._header["COMMENT"][1] == "I am fire. I am death!"
# Test deleting by keyword and by slice
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
del hdr["COMMENT"]
idx = hdr.index("TEST1")
del hdr[idx : idx + 2]
with fits.open(self.temp("comp.fits")) as hdul:
assert "COMMENT" not in hdul[1].header
assert "COMMENT" not in hdul[1]._header
assert "TEST1" not in hdul[1].header
assert "TEST1" not in hdul[1]._header
assert "TEST2" not in hdul[1].header
assert "TEST2" not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data("comp.fits")) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, "TFIELDS", 8)
test_set_keyword(hdr, "TTYPE1", "Foo")
test_set_keyword(hdr, "ZCMPTYPE", "ASDF")
test_set_keyword(hdr, "ZVAL1", "Foo")
def test_compression_header_append(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append("TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
imghdr.append(("FOO", "bar", "qux"), end=True)
assert "FOO" in imghdr
assert imghdr[-1] == "bar"
assert "FOO" in tblhdr
assert tblhdr[-1] == "bar"
imghdr.append(("CHECKSUM", "abcd1234"))
assert "CHECKSUM" in imghdr
assert imghdr["CHECKSUM"] == "abcd1234"
assert "CHECKSUM" not in tblhdr
assert "ZHECKSUM" in tblhdr
assert tblhdr["ZHECKSUM"] == "abcd1234"
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data("comp.fits")) as hdul:
header = hdul[1].header
while len(header) < 1000:
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, "TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
assert tblhdr.count("TFIELDS") == 1
# First try keyword-relative insert
imghdr.insert("TELESCOP", ("OBSERVER", "Phil Plait"))
assert "OBSERVER" in imghdr
assert imghdr.index("OBSERVER") == imghdr.index("TELESCOP") - 1
assert "OBSERVER" in tblhdr
assert tblhdr.index("OBSERVER") == tblhdr.index("TELESCOP") - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index("OBSERVER")
imghdr.insert("OBSERVER", ("FOO",))
assert "FOO" in imghdr
assert imghdr.index("FOO") == idx
assert "FOO" in tblhdr
assert tblhdr.index("FOO") == tblhdr.index("OBSERVER") - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set("ZBITPIX", 77, "asdf", after="XTENSION")
assert len(w) == 1
assert "ZBITPIX" not in imghdr
assert tblhdr.count("ZBITPIX") == 1
assert tblhdr["ZBITPIX"] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set("GCOUNT", 99, before="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") - 1
assert imghdr["GCOUNT"] == 99
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") - 1
assert tblhdr["ZGCOUNT"] == 99
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
imghdr.set("GCOUNT", 2, after="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") + 1
assert imghdr["GCOUNT"] == 2
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") + 1
assert tblhdr["ZGCOUNT"] == 2
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header["COMMENT"] = "hello world"
assert hdu.header["COMMENT"] == ["hello world"]
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["COMMENT"] == ["hello world"]
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype="float32")
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(("ZTENSION", "IMAGE"))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count("ZTENSION") == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZNAXIS")
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZBITPIX")
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data("double_ext.fits")) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices["EXTNAME"]
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == "ccd00"
assert "EXTNAME" in hdu.header
assert hdu.name == hdu.header["EXTNAME"]
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices["EXTNAME"]
assert len(indices) == 1
# Test header sync from property set.
new_name = "NEW_NAME"
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header["EXTNAME"] == new_name
assert hdu._header["EXTNAME"] == new_name
assert hdu._image_header["EXTNAME"] == new_name
# Check that setting the header will change the name property.
hdu.header["EXTNAME"] = "NEW2"
assert hdu.name == "NEW2"
hdul.writeto(self.temp("tmp.fits"), overwrite=True)
with fits.open(self.temp("tmp.fits")) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices["EXTNAME"]) == 1
assert hdu1.name == "NEW2"
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header["EXTNAME"]
hdu.name = "RE-ADDED"
assert hdu.name == "RE-ADDED"
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = "FOO"
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
name = "BAR"
hdu.name = name
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices["EXTNAME"]) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({"HELLO": "world"})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header["HELLO"] == "world"
@pytest.mark.parametrize(
("keyword", "dtype", "expected"),
[
("BSCALE", np.uint8, np.float32),
("BSCALE", np.int16, np.float32),
("BSCALE", np.int32, np.float64),
("BZERO", np.uint8, np.float32),
("BZERO", np.int16, np.float32),
("BZERO", np.int32, np.float64),
],
)
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp("test.fits"))
del hdu
with fits.open(self.temp("test.fits")) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize(
"dtype", (np.uint8, np.int16, np.uint16, np.int32, np.uint32)
)
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid - 50, mid + 50, dtype=dtype)
testfile = self.temp("test.fits")
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_write_non_contiguous_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
"""
orig = np.arange(100, dtype=float).reshape((10, 10), order="f")
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp("test.fits"))
actual = fits.getdata(self.temp("test.fits"))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comphdu_bscale(tmp_path):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmp_path / "3hdus.fits"
filename2 = tmp_path / "3hdus_comp.fits"
x = np.random.random((100, 100)) * 100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x - 50, dtype=int), uint=True)
x1.header["BZERO"] = 20331
x1.header["BSCALE"] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(
data=hdus[1].data.astype(np.uint32), header=hdus[1].header
)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify("exception")
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename("data/compressed_float_bzero.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmp_path):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmp_path / "floatimg_with_bzero.fits"
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header["BZERO"] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmp_path):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmp_path / "test.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmp_path / "test2.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
"""Test for int8 support, https://github.com/astropy/astropy/issues/11995"""
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header["BITPIX"] == 8
assert hdul[0].header["BZERO"] == -128
assert hdul[0].header["BSCALE"] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
14d9eb617260fd735ea4e6f41d608f93e840e2a9bd1b3f0f82f90144298a4982 | import itertools
import numpy as np
import pytest
COMPRESSION_TYPES = [
"GZIP_1",
"GZIP_2",
"RICE_1",
"HCOMPRESS_1",
"PLIO_1",
]
def fitsio_param_to_astropy_param(param):
# Convert fitsio kwargs to astropy kwargs
_map = {"qlevel": "quantize_level", "qmethod": "quantize_method"}
param = {_map[k]: v for k, v in param.items()}
# Map quantize_level
if param.get("quantize_level", "missing") is None:
param["quantize_level"] = 0.0
return param
def _expand(*params):
"""
Expands a list of N iterables of parameters into a flat list with all
combinations of all parameters.
"""
expanded = []
for ele in params:
expanded += list(itertools.product(*ele))
return expanded
ALL_INTEGER_DTYPES = [
"".join(ele)
for ele in _expand([("<", ">"), ("i",), ("2", "4")], [("<", ">"), ("u",), ("1",)])
]
ALL_FLOAT_DTYPES = ["".join(ele) for ele in _expand([("<", ">"), ("f",), ("4", "8")])]
@pytest.fixture(
scope="session",
ids=lambda x: " ".join(map(str, x)),
# The params here are compression type, parameters for the compression /
# quantise and dtype
params=_expand(
# Test all compression types with default compression parameters for
# all integers
[
COMPRESSION_TYPES,
({},),
ALL_INTEGER_DTYPES,
],
# GZIP supports lossless non-quantized floating point data
[
("GZIP_1", "GZIP_2"),
({"qlevel": None},),
ALL_FLOAT_DTYPES,
],
# All compression types can also take quantized floating point input
# Rather than running all quantization parameters for all algorithms
# split up the algorithms to reduce the total number of tests.
[
["GZIP_1", "GZIP_2"],
({"qlevel": 5, "qmethod": -1},),
ALL_FLOAT_DTYPES,
],
[
["RICE_1"],
({"qlevel": 10, "qmethod": 1},),
ALL_FLOAT_DTYPES,
],
[
["HCOMPRESS_1"],
(
{"qlevel": 20, "qmethod": 2},
{"qlevel": 10, "qmethod": 1},
),
ALL_FLOAT_DTYPES,
],
# Note no PLIO here as that's intended for masks, i.e. data which can't
# be generated with quantization.
),
)
def comp_param_dtype(request):
return request.param
@pytest.fixture(scope="session")
def compression_type(comp_param_dtype):
return comp_param_dtype[0]
@pytest.fixture(scope="session")
def compression_param(comp_param_dtype):
return comp_param_dtype[1]
@pytest.fixture(scope="session")
def dtype(comp_param_dtype):
return comp_param_dtype[2]
@pytest.fixture(scope="session")
def numpy_rng():
return np.random.default_rng(0)
|
b2b072791d8ddaf71bd1d94aae47360cf6a930e8438963c8b3c2b88d4dceca04 | from pathlib import Path
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy.io import fits
from astropy.io.fits._tiled_compression.codecs import PLIO1
from .conftest import fitsio_param_to_astropy_param
@pytest.fixture
def canonical_data_base_path():
return Path(__file__).parent / "data"
@pytest.fixture(
params=(Path(__file__).parent / "data").glob("m13_*.fits"), ids=lambda x: x.name
)
def canonical_int_hdus(request):
"""
This fixture provides 4 files downloaded from https://fits.gsfc.nasa.gov/registry/tilecompression.html
Which are used as canonical tests of data not compressed by Astropy.
"""
with fits.open(request.param) as hdul:
yield hdul[1]
@pytest.fixture
def original_int_hdu(canonical_data_base_path):
with fits.open(canonical_data_base_path / "m13.fits") as hdul:
yield hdul[0]
def test_canonical_data(original_int_hdu, canonical_int_hdus):
assert_allclose(original_int_hdu.data, canonical_int_hdus.data)
def test_zblank_support(canonical_data_base_path, tmp_path):
# This uses a test 12x12 image which contains a NaN value in the [1, 1]
# pixel - it was compressed using fpack which automatically added a ZBLANK
# header keyword
reference = np.arange(144).reshape((12, 12)).astype(float)
reference[1, 1] = np.nan
with fits.open(canonical_data_base_path / "compressed_with_nan.fits") as hdul:
assert_equal(np.round(hdul[1].data), reference)
# Now generate a file ourselves and check that the output has the ZBLANK
# keyword set automatically
hdu = fits.CompImageHDU(data=reference, compression_type="RICE_1", tile_size=(6, 6))
hdu.writeto(tmp_path / "test_zblank.fits")
with fits.open(tmp_path / "test_zblank.fits") as hdul:
assert "ZBLANK" in hdul[1].header
assert_equal(np.round(hdul[1].data), reference)
@pytest.mark.parametrize(
("shape", "tile_dim"),
(
([10, 10], [5, 5]), # something for HCOMPRESS
([5, 5, 5], [5, 5, 5]),
# ([5, 5, 5], [5, 5, 1]), # something for HCOMPRESS
([10, 15, 20], [5, 5, 5]),
([10, 5, 12], [5, 5, 5]),
# TODO: There's a stupid bit of code in CompImageHDU which stops this working.
# ([2, 3, 4, 5], [1, 1, 2, 3]),
([2, 3, 4, 5], [5, 5, 1, 1]),
),
)
def test_roundtrip_high_D(
numpy_rng, compression_type, compression_param, tmp_path, dtype, shape, tile_dim
):
if compression_type == "HCOMPRESS_1" and (
# We don't have at least a 2D image
len(shape) < 2
or
# We don't have 2D tiles
np.count_nonzero(np.array(tile_dim) != 1) != 2
or
# TODO: The following restrictions can be lifted with some extra work.
# The tile is not the first two dimensions of the data
tile_dim[0] == 1
or tile_dim[1] == 1
or
# The tile dimensions not an integer multiple of the array dims
np.count_nonzero(np.array(shape[:2]) % tile_dim[:2]) != 0
):
pytest.xfail("HCOMPRESS requires 2D tiles.")
random = numpy_rng.uniform(high=255, size=shape)
# Set first value to be exactly zero as zero values require special treatment
# for SUBTRACTIVE_DITHER_2
random.ravel()[0] = 0.0
original_data = random.astype(dtype)
dtype_sanitizer = {
">": "big",
"<": "little",
"=": "native",
}
filename = (
tmp_path / f"{compression_type}_{dtype[1:]}_{dtype_sanitizer[dtype[0]]}.fits"
)
param = fitsio_param_to_astropy_param(compression_param)
hdu = fits.CompImageHDU(
data=original_data,
compression_type=compression_type,
tile_size=tile_dim,
**param,
)
hdu.writeto(filename)
atol = 0
if compression_param.get("qmethod", None) is not None:
# This is a horrific hack We are comparing quantized data to unquantized
# data here, so there can be pretty large differences. What this test
# is really checking for is arrays which are *completely* different,
# which would indicate the compression has not worked.
atol = 17
with fits.open(filename) as hdul:
a = hdul[1].data
np.testing.assert_allclose(original_data, hdul[1].data, atol=atol)
def test_plio_1_out_of_range():
pc = PLIO1(tilesize=10)
data = np.arange(-10, 0).astype(np.int32)
with pytest.raises(ValueError):
pc.encode(data)
|
e23523f5669049e670f1c544371b2a24dc2b9894b78a3232314184d2a5253b75 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import numpy as np
from asdf.tags.core.ndarray import NDArrayType
from asdf.tests import helpers
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
from astropy.time import Time, TimeDelta
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_array_columns(tmpdir):
a = np.array(
[
([[1, 2], [3, 4]], 2.0, "x"),
([[5, 6], [7, 8]], 5.0, "y"),
([[9, 10], [11, 12]], 8.2, "z"),
],
dtype=[("a", "<i4", (2, 2)), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
assert t.columns["a"].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_structured_array_columns(tmpdir):
a = np.array(
[((1, "a"), 2.0, "x"), ((4, "b"), 5.0, "y"), ((5, "c"), 8.2, "z")],
dtype=[("a", [("a0", "<i4"), ("a1", "|S1")]), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table_row_order(tmpdir):
a = np.array(
[(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")],
dtype=[("a", "<i4"), ("b", "<f8"), ("c", "|S1")],
)
t = table.Table(a, copy=False)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"))
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version("2.8.0"):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree(
{"table": t},
tmpdir,
asdf_check_func=check,
write_options={"auto_inline": 64},
)
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff):
pass
assert "Inconsistent data column lengths" in str(err.value)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, "x"), (4, 5.0, "y"), (5, 8.2, "z")]
t = table.Table(
rows=data_rows, names=("a", "b", "c"), dtype=("i4", "f8", "S1"), masked=True
)
t.columns["a"].description = "RA"
t.columns["a"].unit = "degree"
t.columns["a"].meta = {"foo": "bar"}
t.columns["a"].mask = [True, False, True]
t.columns["c"].description = "Some description of some sort"
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t["a"] = [1, 2, 3]
t["b"] = ["x", "y", "z"]
t["c"] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff["table"]["c"], u.Quantity)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = Time(["2001-01-02T12:34:56", "2001-02-03T00:01:02"])
def check(ff):
assert isinstance(ff["table"]["c"], Time)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff["table"]["c"], TimeDelta)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
def check(ff):
assert isinstance(ff["table"]["c"], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new["a"], old["a"])
NDArrayType.assert_equal(new["b"], old["b"])
assert skycoord_equal(new["c"], old["c"])
helpers.assert_roundtrip_tree(
{"table": t}, tmpdir, asdf_check_func=check, tree_match_func=tree_match
)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff["table"]["c"], EarthLocation)
helpers.assert_roundtrip_tree({"table": t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t["a"] = [1, 2]
t["b"] = ["x", "y"]
t["c"] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({"table": t}, tmpdir)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], table.Table)
run_schema_example_test("stsci.edu", "asdf", "core/table", "1.0.0", check)
|
91ad634589321411f9345a0b8b03d11fdbb24b1602b0f77bf3796258b08d69d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
import os
import numpy as np
from asdf.tests import helpers
from astropy.io import fits
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_complex_structure(tmpdir):
with fits.open(
os.path.join(os.path.dirname(__file__), "data", "complex.fits"), memmap=False
) as hdulist:
tree = {"fits": hdulist}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_fits_table(tmpdir):
a = np.array([(0, 1), (2, 3)], dtype=[("A", int), ("B", int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {"fits": h}
def check_yaml(content):
assert b"!<tag:astropy.org:astropy/table/table-1.0.0>" in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
@pytest.mark.filterwarnings(
"ignore:The property AsdfFile.blocks has been deprecated:asdf.exceptions.AsdfDeprecationWarning"
)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile["example"], fits.HDUList)
run_schema_example_test("stsci.edu", "asdf", "fits/fits", "1.0.0", check)
|
5767726f971a7575cdc90762e910fd11742c00d6ac82d7eff1776625709b2caa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and data_median.ndim == 0
and np.isnan(data_median)
):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (
isinstance(data, u.Quantity)
and func is np.median
and result.ndim == 0
and np.isnan(result)
):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
3dc70d55c9ef497b47aafdde501474944db6dbb3430fb52c483d51a15ef393a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import sys
import types
import warnings
import weakref
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.units import Quantity, QuantityInfo
from astropy.utils import ShapedLikeNDArray, isiterable
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaAttribute, MetaData
from . import conf, groups
from .column import (
BaseColumn,
Column,
FalseArray,
MaskedColumn,
_auto_names,
_convert_sequence_data_to_array,
col_copy,
)
from .connect import TableRead, TableWrite
from .index import (
Index,
SlicedIndex,
TableILoc,
TableIndices,
TableLoc,
TableLocIndices,
_IndexModeContext,
get_index,
)
from .info import TableInfo
from .mixins.registry import get_mixin_handler
from .ndarray_mixin import NdarrayMixin # noqa: F401
from .pprint import TableFormatter
from .row import Row
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = [
"Table.read",
"Table.write",
"Table._read",
"Table.convert_bytestring_to_unicode",
"Table.convert_unicode_to_bytestring",
]
__doctest_requires__ = {"*pandas": ["pandas>=1.1"]}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = "O" if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, "shape") else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, "info", None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
f"Illegal key or index value for {type(self).__name__} object"
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
f"Cannot replace column '{item}'. Use Table.replace_column() instead."
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist."""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list."""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder("=")
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"):
data[col.info.name].mask = col.mask
return data
def __init__(
self,
data=None,
masked=False,
names=None,
dtype=None,
meta=None,
copy=True,
rows=None,
copy_indices=True,
units=None,
descriptions=None,
**kwargs,
):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError("Cannot specify dtype when copy=False")
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError("Cannot supply both `data` and `rows` values")
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, "__astropy_table__"):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError(
f"__init__() got unexpected keyword argument {list(kwargs.keys())[0]!r}"
)
if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names:
data = None
if isinstance(data, self.Row):
data = data._table[data._index : data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (
names_from_list_of_dict or _get_names_from_list_of_dict(data)
)
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError("Can not initialize a Table with a scalar")
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError(
"dtype was specified but could not be "
"parsed for column names"
)
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f"Data type {type(data)} not allowed to init Table")
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute("unit", units)
self._set_column_attribute("description", descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(
f"sequence of {attr} values must match number of columns"
)
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(
f"invalid column name {name} for setting {attr} attribute"
)
# Special case: ignore unit if it is an empty or blank string
if attr == "unit" and isinstance(value, str):
if value.strip() == "":
value = None
if value not in (np.ma.masked, None):
col = self[name]
if attr == "unit" and isinstance(col, Quantity):
# Update the Quantity unit in-place
col <<= value
else:
setattr(col.info, attr, value)
def __getstate__(self):
columns = OrderedDict(
(key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items()
)
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table(
[
getattr(col, "mask", FalseArray(col.shape))
for col in self.itercols()
],
names=self.colnames,
copy=False,
)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property.
"""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [
col.filled(fill_value) if hasattr(col, "filled") else col
for col in self.itercols()
]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
"""
Return the indices associated with columns of the table
as a TableIndices object.
"""
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
"""
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
"""
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
"""
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
"""
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
'Cannot create an index on column "{}", of type "{}"'.format(
col.info.name, type(col)
)
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
"""
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
"""
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
"""
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
if np.dtype(dtype) != object:
raise ValueError("Datatype coercion is not allowed")
out = np.array(None, dtype=object)
out[()] = self
return out
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, "dtype"), (names, "names")):
if not isiterable(inp_list):
raise ValueError(f"{inp_str} must be a list or None")
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(
self, data, copy=True, default_name=None, dtype=None, name=None
):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (
original_data.__class__.__module__
+ "."
+ original_data.__class__.__name__
)
raise TypeError(
"Mixin handler for object of type "
f"{fully_qualified_name} "
"did not return a valid mixin column"
)
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif "info" in getattr(data, "__dict__", ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute. If not copying, take a slice
# to ensure we get a new instance and we do not share metadata
# like info.
col = col_copy(data, copy_indices=self._init_indices) if copy else data[:]
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, "dtype"):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = (
masked_col_cls
if isinstance(data, np.ma.MaskedArray)
else self.ColumnClass
)
else:
col_cls = self.ColumnClass
try:
col = col_cls(
name=name,
data=data,
dtype=dtype,
copy=copy,
copy_indices=self._init_indices,
)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError("unable to convert data to Column for Table")
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array."""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = (
[data[name] for name in data_names]
if struct
else [data[:, i] for i in range(n_cols)]
)
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns."""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects."""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f"Inconsistent data column lengths: {lengths}")
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(
table, newcols, verify=False, names=self.columns.keys()
)
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError("Cannot have None for column name")
if len(set(names)) != len(names):
raise ValueError("Duplicate column names")
table.columns = table.TableColumns(
(name, col) for name, col in zip(names, cols)
)
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, "mask"):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(
self,
html=False,
descr_vals=None,
max_width=None,
tableid=None,
show_dtype=True,
max_lines=None,
tableclass=None,
):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(self)}")
descr = " ".join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f"<i>{xml_escape(descr)}</i>\n"
else:
descr = f"<{descr}>\n"
if tableid is None:
tableid = f"table{id(self)}"
data_lines, outs = self.formatter._pformat_table(
self,
tableid=tableid,
html=html,
max_width=max_width,
show_name=True,
show_unit=None,
show_dtype=show_dtype,
max_lines=max_lines,
tableclass=tableclass,
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(
html=True, max_width=-1, tableclass=conf.default_notebook_table_class
)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f"<div>{out}</div>"
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return "\n".join(self.pformat())
def __bytes__(self):
return str(self).encode("utf-8")
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
return any(hasattr(col, "mask") and np.any(col.mask) for col in self.itercols())
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(
max_lines, max_width, show_name, show_unit, show_dtype, align
)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()), copy=False)
else:
return self
def show_in_notebook(
self,
tableid=None,
css=None,
display_length=50,
table_class="astropy-default",
show_row_index="idx",
):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from IPython.display import HTML
from .jsviewer import JSViewer
if tableid is None:
tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}"
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == "astropy-default":
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(
html=True,
max_width=-1,
tableid=tableid,
max_lines=-1,
show_dtype=False,
tableclass=table_class,
)
columns = display_table.columns.values()
sortable_columns = [
i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc"
]
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(
self,
max_lines=5000,
jsviewer=False,
browser="default",
jskwargs={"use_local_files": True},
tableid=None,
table_class="display compact",
css=None,
show_row_index="idx",
):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import tempfile
import webbrowser
from urllib.parse import urljoin
from urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "table.html")
with open(path, "w") as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(
tmp,
format="jsviewer",
css=css,
max_lines=max_lines,
jskwargs=jskwargs,
table_id=tableid,
table_class=table_class,
)
else:
self.write(tmp, format="html")
try:
br = webbrowser.get(None if browser == "default" else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin("file:", pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
tableid=tableid,
tableclass=tableclass,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(
max_lines,
max_width,
show_name,
show_unit,
show_dtype,
html,
tableid,
align,
tableclass,
)
def more(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__(
[self[x] for x in item], copy_indices=self._copy_indices
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif (isinstance(item, np.ndarray) and item.size == 0) or (
isinstance(item, (tuple, list)) and not item
):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (
not getattr(self, "_setitem_inplace", False)
and not conf.replace_inplace
):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError(
"Right side value needs {} elements (one for each column)".format(
n_cols
)
)
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif isinstance(item, (list, tuple, np.ndarray)) and all(
isinstance(x, str) for x in item
):
self.remove_columns(item)
elif (
isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i"
):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError("illegal key or index value")
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception(
"Masked attribute is read-only (use t = Table(t, masked=True)"
" to convert to a masked table)"
)
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings."""
return (
isinstance(names, (tuple, list))
and names
and all(isinstance(x, str) for x in names)
)
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def __or__(self, other):
if isinstance(other, Table):
updated_table = self.copy()
updated_table.update(other)
return updated_table
else:
return NotImplemented
def __ior__(self, other):
try:
self.update(other)
return self
except TypeError:
return NotImplemented
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(
self,
col,
index=None,
name=None,
rename_duplicate=False,
copy=True,
default_name=None,
):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f"col{len(self.columns)}"
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(
col, name=name, copy=copy, default_name=default_name
)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError("Empty table cannot have column set to scalar value")
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, "shape", ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape, subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape, subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError("Inconsistent data column lengths")
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + "_" + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(
self, cols, indexes=None, names=None, copy=True, rename_duplicate=False
):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError("Number of indexes must match number of cols")
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError("Number of names must match number of cols")
default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes, kind="stable")):
self.add_column(
cols[ii],
index=indexes[ii],
name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate,
copy=copy,
)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
# sys.getrefcount is CPython specific and not on PyPy.
if (
"refcount" in warns
and name in self.colnames
and hasattr(sys, "getrefcount")
):
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if "always" in warns:
warnings.warn(
f"replaced column '{name}'", TableReplaceWarning, stacklevel=3
)
if "slice" in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = (
"replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
# sys.getrefcount is CPython specific and not on PyPy.
if "refcount" in warns and hasattr(sys, "getrefcount"):
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = (
"replaced column '{}' and the number of references "
"to the column changed.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if "attributes" in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = "replaced column '{}' and column attributes {} changed.".format(
name, changed_attrs
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f"column name {name} is not in the table")
if self[name].info.indices:
raise ValueError("cannot replace a table index column")
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError("length of new column must match table length")
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f"{name} is not a valid column name")
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f"columns {invalid_names} do not exist")
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
"""
for name in self._set_of_names_in_colnames(names):
del self.columns[name]
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, "utf-8"))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in (
col.info.attr_names - col.info._attrs_no_copy - {"dtype"}
):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype("S", "U", np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype("U", "S", np.char.encode)
def keep_columns(self, names):
"""
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
"""
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
del self.columns[colname]
def rename_column(self, name, new_name):
"""
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
"""
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
"""
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
"""
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError(
"input 'new_names' must be a tuple or a list of column names"
)
if len(names) != len(new_names):
raise ValueError(
"input 'names' and 'new_names' list arguments must be the same length"
)
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError(
"right hand side must be a sequence of values with "
"the same length as the number of selected columns"
)
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError(
f"Index {index} is out of bounds for table with length {N}"
)
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError("keys in mask should match keys in vals")
if vals and any(name not in colnames for name in vals):
raise ValueError("Keys in vals must all be valid column names")
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, "dtype"):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError("Mismatch between number of vals and columns")
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError("Mismatch between number of masks and columns")
else:
mask = [False] * len(self.columns)
else:
raise TypeError("Vals must be an iterable or mapping or None")
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if (
mask_
and isinstance(col, Column)
and not isinstance(col, MaskedColumn)
):
col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError(
"Incorrect length for column {} after inserting {}"
" (expected {}, got {})".format(name, val, len(newcol), N + 1)
)
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, "mask"):
newcol[index] = np.ma.masked
else:
raise TypeError(
"mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name)
)
columns[name] = newcol
except Exception as err:
raise ValueError(
"Unable to insert row because of exception in column '{}':\n{}".format(
name, err
)
) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``. If ``other`` is a
|Table| instance then ``|=`` is available as alternate syntax for in-place
update and ``|`` can be used merge data to a new table.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts="silent")
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
"""
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
"""
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, "_groups"):
out._groups = groups.TableGroups(
out, indices=self._groups._indices, keys=self._groups._keys
)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError("cannot compare tables with different column names")
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
eq = self[name] == other[name]
if (
warns
and issubclass(warns[-1].category, FutureWarning)
and "elementwise comparison failed" in str(warns[-1].message)
):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f"unable to compare column {name}") from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (
isinstance(eq, np.ndarray)
and eq.dtype is np.dtype("bool")
and len(eq) == len(self)
):
raise TypeError(
f"comparison for column {name} returned {eq} "
"instead of the expected boolean ndarray"
)
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``.
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance.
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError(
"index must be None, False, True or a table column name"
)
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.time import TimeBase, TimeDelta
from . import serialize
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = (
col_copy(col, copy_indices=False) if col.info.indices else col
)
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype("timedelta64[ns]")
nat = np.timedelta64("NaT")
else:
new_col = col.datetime64.copy()
nat = np.datetime64("NaT")
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
# fmt: off
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)'
)
# fmt: on
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, "isnative", True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder("=")
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ["i", "u"]:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace("i", "I").replace("u", "U")
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to"
f" {out[name].dtype}",
TableReplaceWarning,
stacklevel=3,
)
elif column.dtype.kind not in ["f", "c"]:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs["index"] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance.
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or "index"
while index_name in names:
index_name = "_" + index_name + "_"
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f"`units` contains additional columns: {not_found}")
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ["u", "i"] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(
data=data, name=name, mask=mask, unit=unit, copy=False
)
continue
if data.dtype.kind == "O":
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b""
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == "M":
from astropy.time import Time
out[name] = Time(data, format="datetime64")
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = "isot"
# Numpy timedelta64
elif data.dtype.kind == "m":
from astropy.time import TimeDelta
data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format="sec")
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
For more information see:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, "unit", None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(
f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning,
)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
22e0e644bcfa5eadbe3c3eb55abbf01c7470031a525e1fb7070a148d8d7dd09d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = [
"AiryDisk2D",
"Moffat1D",
"Moffat2D",
"Box1D",
"Box2D",
"Const1D",
"Const2D",
"Ellipse2D",
"Disk2D",
"Gaussian1D",
"Gaussian2D",
"Linear1D",
"Lorentz1D",
"RickerWavelet1D",
"RickerWavelet2D",
"RedshiftScaleFactor",
"Multiply",
"Planar2D",
"Scale",
"Sersic1D",
"Sersic2D",
"Shift",
"Sine1D",
"Cosine1D",
"Tangent1D",
"ArcSine1D",
"ArcCosine1D",
"ArcTangent1D",
"Trapezoid1D",
"TrapezoidDisk2D",
"Ring2D",
"Voigt1D",
"KingProjectedAnalytic1D",
"Exponential1D",
"Logarithmic1D",
]
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Gaussian"
)
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian",
)
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev**2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mean": inputs_unit[self.inputs[0]],
"stddev": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function."""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description="Redshift", default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function."""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative."""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model."""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()
)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)
)
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_eff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1.0 / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": inputs_unit[self.inputs[0]] ** -1,
"amplitude": outputs_unit[self.outputs[0]],
}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative."""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (
TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine."""
return ArcSine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative."""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = -(
TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine."""
return ArcCosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models.
"""
@property
def input_units(self):
if self.amplitude.unit is None:
return None
return {self.inputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": outputs_unit[self.outputs[0]] ** -1,
"amplitude": inputs_unit[self.inputs[0]],
}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine."""
return Sine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative."""
d_amplitude = x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine."""
return Cosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent."""
return Tangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2)
d_x_0 = (
amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2)
)
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0, description="Position of the peak")
amplitude_L = Parameter(default=1, description="The Lorentzian amplitude")
fwhm_L = Parameter(
default=2 / np.pi, description="The Lorentzian full width at half maximum"
)
fwhm_G = Parameter(
default=np.log(2), description="The Gaussian full width at half maximum"
)
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(
self,
x_0=x_0.default,
amplitude_L=amplitude_L.default,
fwhm_L=fwhm_L.default,
fwhm_G=fwhm_G.default,
method="humlicek2",
**kwargs,
):
if str(method).lower() in ("wofz", "scipy"):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == "humlicek2":
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(
f"Not a valid method for Voigt1D Faddeeva function: {method}."
)
self.method = self._faddeeva.__name__
super().__init__(
x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs
)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`.
"""
if z.shape == self._last_z.shape and np.allclose(
z, self._last_z, rtol=1.0e-14, atol=1.0e-15
):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""
Derivative of the one dimensional Voigt function with respect to parameters.
"""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [
-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G,
]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm_L": inputs_unit[self.inputs[0]],
"fwhm_G": inputs_unit[self.inputs[0]],
"amplitude_L": outputs_unit[self.outputs[0]],
}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z = x + iy) combining Humlicek's rational approximations.
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
# fmt: off
AA = np.array(
[
+46236.3358828121, -147726.58393079657j,
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j,
]
) # 1j/sqrt(pi) to the 12. digit
bb = np.array(
[
+7918.06640624997,
-126689.0625,
+295607.8125,
-236486.25,
+84459.375,
-15015.0,
+1365.0,
-60.0,
+1.0,
]
)
# fmt: on
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
# fmt: off
# Recursive algorithms for the polynomials in Z with coefficients AA, bb
# numer = 0.0
# for A in AA[::-1]:
# numer = numer * Z + A
# Explicitly unrolled above loop for speed
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
# denom = 0.0
# for b in bb[::-1]:
# denom = denom * ZZ + b
# Explicitly unrolled above loop for speed
denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ +
bb[2])*ZZ + bb[1])*ZZ + bb[0]
# fmt: on
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse", mag=True)
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"a": inputs_unit[self.inputs[0]],
"b": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0**2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return (
(self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0),
)
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude", mag=True)
x_0 = Parameter(
default=0, description="X position of the center of the box function"
)
y_0 = Parameter(
default=0, description="Y position of the center of the box function"
)
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function."""
x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0)
y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[1]],
"x_width": inputs_unit[self.inputs[0]],
"y_width": inputs_unit[self.inputs[1]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function."""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.0
x3 = x_0 + width / 2.0
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(
default=1, description="Slope of tails of trapezoid in x direction"
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function."""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit["x"] != inputs_unit["y"]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(
default=0, description="X position of the maximum of the Moffat model"
)
y_0 = Parameter(
default=0, description="Y position of the maximum of the Moffat model"
)
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg))
d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2.0 * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_eff": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor",
)
r_core = Parameter(
default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius"
)
r_tide = Parameter(
default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius"
)
@property
def concentration(self):
"""Concentration parameter of the king model."""
return np.log10(np.abs(self.r_tide / self.r_core))
@staticmethod
def _core_func(x, r_core, r_tide, power=1):
return (
1.0 / np.sqrt(x**2 + r_core**2) ** power
- 1.0 / np.sqrt(r_tide**2 + r_core**2) ** power
)
@staticmethod
def _filter(x, r_tide, result):
"""Set invalid r values to 0"""
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.0
def evaluate(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, result)
return result
def fit_deriv(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, d_amplitude)
d_r_core = (
-2.0
* amplitude
* r_core**3
* self._core_func(x, r_core, r_tide, power=3)
* self._core_func(x, r_core, r_tide)
+ 2 * amplitude * r_core * self._core_func(x, r_core, r_tide) ** 2
)
self._filter(x, r_tide, d_r_core)
d_r_tide = (
2 * amplitude * r_core**2 * r_tide * self._core_func(x, r_core, r_tide)
) / (r_core**2 + r_tide**2) ** (3 / 2)
self._filter(x, r_tide, d_r_tide)
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_core": inputs_unit[self.inputs[0]],
"r_tide": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
3736ad7fb53e9e3878831989e1fa82d30ef16f79f999e0cddf139aeb0b638d50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `typing_extensions.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* if python v3.9+ : `typing.Annotated`
* if :mod:`typing_extensions` is installed : `typing_extensions.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
# LOCAL
from ._typing import HAS_ANNOTATED, Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Allow to sort of work for python 3.8- / no typing_extensions
# instead of bailing out, return the unit for `quantity_input`
if not HAS_ANNOTATED:
warnings.warn(
"Quantity annotations are valid static type annotations only"
" if Python is v3.9+ or `typing_extensions` is installed."
)
return unit
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated.__class_getitem__((cls, unit))
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError, AttributeError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result if unit is None else self._new_view(result, unit, finalize=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, finalize=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
finalize : bool, optional
Whether to call ``__array_finalize__`` to transfer properties from
``self`` to the new view of ``obj`` (e.g., ``info`` for all
subclasses, or ``_wrap_angle`` for `~astropy.coordinates.Latitude`).
Default: `True`, as appropriate for, e.g., unit conversions or slicing,
where the nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
if finalize:
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See Also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See Also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, finalize=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1.0 / self.value, other / self.unit, finalize=False)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, finalize=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], finalize=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
7d5a79087c536fec9ae67a698f1859c69a5b8902678a62f935c059102ab45333 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import json
import socket
import urllib.error
import urllib.parse
import urllib.request
from warnings import warn
import erfa
import numpy as np
from astropy import constants as consts
from astropy import units as u
from astropy.units.quantity import QuantityInfoBase
from astropy.utils import data
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle, Latitude, Longitude
from .errors import UnknownSiteException
from .matrix_utilities import matrix_transpose
from .representation import (
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
)
__all__ = [
"EarthLocation",
"BaseGeodeticRepresentation",
"WGS84GeodeticRepresentation",
"WGS72GeodeticRepresentation",
"GRS80GeodeticRepresentation",
]
GeodeticLocation = collections.namedtuple("GeodeticLocation", ["lon", "lat", "height"])
ELLIPSOIDS = {}
"""Available ellipsoids (defined in erfam.h, with numbers exposed in erfa)."""
# Note: they get filled by the creation of the geodetic classes.
OMEGA_EARTH = (1.002_737_811_911_354_48 * u.cycle / u.day).to(
1 / u.s, u.dimensionless_angles()
)
"""
Rotational velocity of Earth, following SOFA's pvtob.
In UT1 seconds, this would be 2 pi / (24 * 3600), but we need the value
in SI seconds, so multiply by the ratio of stellar to solar day.
See Explanatory Supplement to the Astronomical Almanac, ed. P. Kenneth
Seidelmann (1992), University Science Books. The constant is the
conventional, exact one (IERS conventions 2003); see
http://hpiers.obspm.fr/eop-pc/index.php?index=constants.
"""
def _check_ellipsoid(ellipsoid=None, default="WGS84"):
if ellipsoid is None:
ellipsoid = default
if ellipsoid not in ELLIPSOIDS:
raise ValueError(f"Ellipsoid {ellipsoid} not among known ones ({ELLIPSOIDS})")
return ellipsoid
def _get_json_result(url, err_str, use_google):
# need to do this here to prevent a series of complicated circular imports
from .name_resolve import NameResolveError
try:
# Retrieve JSON response from Google maps API
resp = urllib.request.urlopen(url, timeout=data.conf.remote_timeout)
resp_data = json.loads(resp.read().decode("utf8"))
except urllib.error.URLError as e:
# This catches a timeout error, see:
# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python
if isinstance(e.reason, socket.timeout):
raise NameResolveError(err_str.format(msg="connection timed out")) from e
else:
raise NameResolveError(err_str.format(msg=e.reason)) from e
except socket.timeout:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
raise NameResolveError(err_str.format(msg="connection timed out"))
if use_google:
results = resp_data.get("results", [])
if resp_data.get("status", None) != "OK":
raise NameResolveError(
err_str.format(msg="unknown failure with Google API")
)
else: # OpenStreetMap returns a list
results = resp_data
if not results:
raise NameResolveError(err_str.format(msg="no results returned"))
return results
class EarthLocationInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("x", "y", "z", "ellipsoid")
def _construct_from_dict(self, map):
# Need to pop ellipsoid off and update post-instantiation. This is
# on the to-fix list in #4261.
ellipsoid = map.pop("ellipsoid")
out = self._parent_cls(**map)
out.ellipsoid = ellipsoid
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new EarthLocation instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : EarthLocation (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Very similar to QuantityInfo.new_like, but the creation of the
# map is different enough that this needs its own rouinte.
# Get merged info attributes shape, dtype, format, description.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# The above raises an error if the dtypes do not match, but returns
# just the string representation, which is not useful, so remove.
attrs.pop("dtype")
# Make empty EarthLocation using the dtype and unit of the last column.
# Use zeros so we do not get problems for possible conversion to
# geodetic coordinates.
shape = (length,) + attrs.pop("shape")
data = u.Quantity(
np.zeros(shape=shape, dtype=cols[0].dtype), unit=cols[0].unit, copy=False
)
# Get arguments needed to reconstruct class
map = {
key: (data[key] if key in "xyz" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class EarthLocation(u.Quantity):
"""
Location on the Earth.
Initialization is first attempted assuming geocentric (x, y, z) coordinates
are given; if that fails, another attempt is made assuming geodetic
coordinates (longitude, latitude, height above a reference ellipsoid).
When using the geodetic forms, Longitudes are measured increasing to the
east, so west longitudes are negative. Internally, the coordinates are
stored as geocentric.
To ensure a specific type of coordinates is used, use the corresponding
class methods (`from_geocentric` and `from_geodetic`) or initialize the
arguments with names (``x``, ``y``, ``z`` for geocentric; ``lon``, ``lat``,
``height`` for geodetic). See the class methods for details.
Notes
-----
This class fits into the coordinates transformation framework in that it
encodes a position on the `~astropy.coordinates.ITRS` frame. To get a
proper `~astropy.coordinates.ITRS` object from this object, use the ``itrs``
property.
"""
_ellipsoid = "WGS84"
_location_dtype = np.dtype({"names": ["x", "y", "z"], "formats": [np.float64] * 3})
_array_dtype = np.dtype((np.float64, (3,)))
info = EarthLocationInfo()
def __new__(cls, *args, **kwargs):
# TODO: needs copy argument and better dealing with inputs.
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], EarthLocation):
return args[0].copy()
try:
self = cls.from_geocentric(*args, **kwargs)
except (u.UnitsError, TypeError) as exc_geocentric:
try:
self = cls.from_geodetic(*args, **kwargs)
except Exception as exc_geodetic:
raise TypeError(
"Coordinates could not be parsed as either "
"geocentric or geodetic, with respective "
f'exceptions "{exc_geocentric}" and "{exc_geodetic}"'
)
return self
@classmethod
def from_geocentric(cls, x, y, z, unit=None):
"""
Location on Earth, initialized from geocentric coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array-like
Cartesian coordinates. If not quantities, ``unit`` should be given.
unit : unit-like or None
Physical unit of the coordinate values. If ``x``, ``y``, and/or
``z`` are quantities, they will be converted to this unit.
Raises
------
astropy.units.UnitsError
If the units on ``x``, ``y``, and ``z`` do not match or an invalid
unit is given.
ValueError
If the shapes of ``x``, ``y``, and ``z`` do not match.
TypeError
If ``x`` is not a `~astropy.units.Quantity` and no unit is given.
"""
if unit is None:
try:
unit = x.unit
except AttributeError:
raise TypeError(
"Geocentric coordinates should be Quantities "
"unless an explicit unit is given."
) from None
else:
unit = u.Unit(unit)
if unit.physical_type != "length":
raise u.UnitsError("Geocentric coordinates should be in units of length.")
try:
x = u.Quantity(x, unit, copy=False)
y = u.Quantity(y, unit, copy=False)
z = u.Quantity(z, unit, copy=False)
except u.UnitsError:
raise u.UnitsError("Geocentric coordinate units should all be consistent.")
x, y, z = np.broadcast_arrays(x, y, z)
struc = np.empty(x.shape, cls._location_dtype)
struc["x"], struc["y"], struc["z"] = x, y, z
return super().__new__(cls, struc, unit, copy=False)
@classmethod
def from_geodetic(cls, lon, lat, height=0.0, ellipsoid=None):
"""
Location on Earth, initialized from geodetic coordinates.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth East longitude. Can be anything that initialises an
`~astropy.coordinates.Angle` object (if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
height : `~astropy.units.Quantity` ['length'] or float, optional
Height above reference ellipsoid (if float, in meters; default: 0).
ellipsoid : str, optional
Name of the reference ellipsoid to use (default: 'WGS84').
Available ellipsoids are: 'WGS84', 'GRS80', 'WGS72'.
Raises
------
astropy.units.UnitsError
If the units on ``lon`` and ``lat`` are inconsistent with angular
ones, or that on ``height`` with a length.
ValueError
If ``lon``, ``lat``, and ``height`` do not have the same shape, or
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geocentric coordinates, the ERFA routine
``gd2gc`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=cls._ellipsoid)
# As wrapping fails on readonly input, we do so manually
lon = Angle(lon, u.degree, copy=False).wrap_at(180 * u.degree)
lat = Latitude(lat, u.degree, copy=False)
# don't convert to m by default, so we can use the height unit below.
if not isinstance(height, u.Quantity):
height = u.Quantity(height, u.m, copy=False)
# get geocentric coordinates.
geodetic = ELLIPSOIDS[ellipsoid](lon, lat, height, copy=False)
xyz = geodetic.to_cartesian().get_xyz(xyz_axis=-1) << height.unit
self = xyz.view(cls._location_dtype, cls).reshape(geodetic.shape)
self._ellipsoid = ellipsoid
return self
@classmethod
def of_site(cls, site_name, *, refresh_cache=False):
"""
Return an object of this class for a known observatory/site by name.
This is intended as a quick convenience function to get basic site
information, not a fully-featured exhaustive registry of observatories
and all their properties.
Additional information about the site is stored in the ``.info.meta``
dictionary of sites obtained using this method (see the examples below).
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
site_name : str
Name of the observatory (case-insensitive).
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
site : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the observatory. The returned class will be the same
as this class.
Examples
--------
>>> from astropy.coordinates import EarthLocation
>>> keck = EarthLocation.of_site('Keck Observatory') # doctest: +REMOTE_DATA
>>> keck.geodetic # doctest: +REMOTE_DATA +FLOAT_CMP
GeodeticLocation(lon=<Longitude -155.47833333 deg>, lat=<Latitude 19.82833333 deg>, height=<Quantity 4160. m>)
>>> keck.info # doctest: +REMOTE_DATA
name = W. M. Keck Observatory
dtype = (float64, float64, float64)
unit = m
class = EarthLocation
n_bad = 0
>>> keck.info.meta # doctest: +REMOTE_DATA
{'source': 'IRAF Observatory Database', 'timezone': 'US/Hawaii'}
See Also
--------
get_site_names : the list of sites that this function can access
"""
registry = cls._get_site_registry(force_download=refresh_cache)
try:
el = registry[site_name]
except UnknownSiteException as e:
raise UnknownSiteException(
e.site, "EarthLocation.get_site_names", close_names=e.close_names
) from e
if cls is el.__class__:
return el
else:
newel = cls.from_geodetic(*el.to_geodetic())
newel.info.name = el.info.name
return newel
@classmethod
def of_address(cls, address, get_height=False, google_api_key=None):
"""
Return an object of this class for a given address by querying either
the OpenStreetMap Nominatim tool [1]_ (default) or the Google geocoding
API [2]_, which requires a specified API key.
This is intended as a quick convenience function to get easy access to
locations. If you need to specify a precise location, you should use the
initializer directly and pass in a longitude, latitude, and elevation.
In the background, this just issues a web query to either of
the APIs noted above. This is not meant to be abused! Both
OpenStreetMap and Google use IP-based query limiting and will ban your
IP if you send more than a few thousand queries per hour [2]_.
.. warning::
If the query returns more than one location (e.g., searching on
``address='springfield'``), this function will use the **first**
returned location.
Parameters
----------
address : str
The address to get the location for. As per the Google maps API,
this can be a fully specified street address (e.g., 123 Main St.,
New York, NY) or a city name (e.g., Danbury, CT), or etc.
get_height : bool, optional
This only works when using the Google API! See the ``google_api_key``
block below. Use the retrieved location to perform a second query to
the Google maps elevation API to retrieve the height of the input
address [3]_.
google_api_key : str, optional
A Google API key with the Geocoding API and (optionally) the
elevation API enabled. See [4]_ for more information.
Returns
-------
location : `~astropy.coordinates.EarthLocation` (or subclass) instance
The location of the input address.
Will be type(this class)
References
----------
.. [1] https://nominatim.openstreetmap.org/
.. [2] https://developers.google.com/maps/documentation/geocoding/start
.. [3] https://developers.google.com/maps/documentation/elevation/start
.. [4] https://developers.google.com/maps/documentation/geocoding/get-api-key
"""
use_google = google_api_key is not None
# Fail fast if invalid options are passed:
if not use_google and get_height:
raise ValueError(
"Currently, `get_height` only works when using the Google geocoding"
" API, which requires passing a Google API key with `google_api_key`."
" See:"
" https://developers.google.com/maps/documentation/geocoding/get-api-key"
" for information on obtaining an API key."
)
if use_google: # Google
pars = urllib.parse.urlencode({"address": address, "key": google_api_key})
geo_url = f"https://maps.googleapis.com/maps/api/geocode/json?{pars}"
else: # OpenStreetMap
pars = urllib.parse.urlencode({"q": address, "format": "json"})
geo_url = f"https://nominatim.openstreetmap.org/search?{pars}"
# get longitude and latitude location
err_str = f"Unable to retrieve coordinates for address '{address}'; {{msg}}"
geo_result = _get_json_result(geo_url, err_str=err_str, use_google=use_google)
if use_google:
loc = geo_result[0]["geometry"]["location"]
lat = loc["lat"]
lon = loc["lng"]
else:
loc = geo_result[0]
lat = float(loc["lat"]) # strings are returned by OpenStreetMap
lon = float(loc["lon"])
if get_height:
pars = {"locations": f"{lat:.8f},{lon:.8f}", "key": google_api_key}
pars = urllib.parse.urlencode(pars)
ele_url = f"https://maps.googleapis.com/maps/api/elevation/json?{pars}"
err_str = f"Unable to retrieve elevation for address '{address}'; {{msg}}"
ele_result = _get_json_result(
ele_url, err_str=err_str, use_google=use_google
)
height = ele_result[0]["elevation"] * u.meter
else:
height = 0.0
return cls.from_geodetic(lon=lon * u.deg, lat=lat * u.deg, height=height)
@classmethod
def get_site_names(cls, *, refresh_cache=False):
"""
Get list of names of observatories for use with
`~astropy.coordinates.EarthLocation.of_site`.
.. note::
This function is meant to access the site registry from the astropy
data server, which is saved in the user's local cache. If you would
like a site to be added there, issue a pull request to the
`astropy-data repository <https://github.com/astropy/astropy-data>`_ .
If the cache already exists the function will use it even if the
version in the astropy-data repository has been updated unless the
``refresh_cache=True`` option is used. If there is no cache and the
online version cannot be reached, this function falls back on a
built-in list, which currently only contains the Greenwich Royal
Observatory as an example case.
Parameters
----------
refresh_cache : bool, optional
If `True`, force replacement of the cached registry with a
newly downloaded version. (Default: `False`)
.. versionadded:: 5.3
Returns
-------
names : list of str
List of valid observatory names
See Also
--------
of_site : Gets the actual location object for one of the sites names
this returns.
"""
return cls._get_site_registry(force_download=refresh_cache).names
@classmethod
def _get_site_registry(cls, force_download=False, force_builtin=False):
"""
Gets the site registry. The first time this either downloads or loads
from the data file packaged with astropy. Subsequent calls will use the
cached version unless explicitly overridden.
Parameters
----------
force_download : bool or str
If not False, force replacement of the cached registry with a
downloaded version. If a str, that will be used as the URL to
download from (if just True, the default URL will be used).
force_builtin : bool
If True, load from the data file bundled with astropy and set the
cache to that.
Returns
-------
reg : astropy.coordinates.sites.SiteRegistry
"""
# need to do this here at the bottom to avoid circular dependencies
from .sites import get_builtin_sites, get_downloaded_sites
if force_builtin and force_download:
raise ValueError("Cannot have both force_builtin and force_download True")
if force_builtin:
reg = cls._site_registry = get_builtin_sites()
else:
reg = getattr(cls, "_site_registry", None)
if force_download or not reg:
try:
if isinstance(force_download, str):
reg = get_downloaded_sites(force_download)
else:
reg = get_downloaded_sites()
except OSError:
if force_download:
raise
msg = (
"Could not access the main site list. Falling back on the "
"built-in version, which is rather limited. If you want to "
"retry the download, use the option 'refresh_cache=True'."
)
warn(msg, AstropyUserWarning)
reg = get_builtin_sites()
cls._site_registry = reg
return reg
@property
def ellipsoid(self):
"""The default ellipsoid used to convert to geodetic coordinates."""
return self._ellipsoid
@ellipsoid.setter
def ellipsoid(self, ellipsoid):
self._ellipsoid = _check_ellipsoid(ellipsoid)
@property
def geodetic(self):
"""Convert to geodetic coordinates for the default ellipsoid."""
return self.to_geodetic()
def to_geodetic(self, ellipsoid=None):
"""Convert to geodetic coordinates.
Parameters
----------
ellipsoid : str, optional
Reference ellipsoid to use. Default is the one the coordinates
were initialized with. Available are: 'WGS84', 'GRS80', 'WGS72'
Returns
-------
lon, lat, height : `~astropy.units.Quantity`
The tuple is a ``GeodeticLocation`` namedtuple and is comprised of
instances of `~astropy.coordinates.Longitude`,
`~astropy.coordinates.Latitude`, and `~astropy.units.Quantity`.
Raises
------
ValueError
if ``ellipsoid`` is not recognized as among the ones implemented.
Notes
-----
For the conversion to geodetic coordinates, the ERFA routine
``gc2gd`` is used. See https://github.com/liberfa/erfa
"""
ellipsoid = _check_ellipsoid(ellipsoid, default=self.ellipsoid)
xyz = self.view(self._array_dtype, u.Quantity)
llh = CartesianRepresentation(xyz, xyz_axis=-1, copy=False).represent_as(
ELLIPSOIDS[ellipsoid]
)
return GeodeticLocation(
Longitude(llh.lon, u.deg, wrap_angle=180 * u.deg, copy=False),
llh.lat << u.deg,
llh.height << self.unit,
)
@property
def lon(self):
"""Longitude of the location, for the default ellipsoid."""
return self.geodetic[0]
@property
def lat(self):
"""Latitude of the location, for the default ellipsoid."""
return self.geodetic[1]
@property
def height(self):
"""Height of the location, for the default ellipsoid."""
return self.geodetic[2]
# mostly for symmetry with geodetic and to_geodetic.
@property
def geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return self.to_geocentric()
def to_geocentric(self):
"""Convert to a tuple with X, Y, and Z as quantities."""
return (self.x, self.y, self.z)
def get_itrs(self, obstime=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
itrs = property(
get_itrs,
doc="""An `~astropy.coordinates.ITRS` object
for the location of this object at the
default ``obstime``.""",
)
def get_gcrs(self, obstime):
"""GCRS position with velocity at ``obstime`` as a GCRS coordinate.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
gcrs : `~astropy.coordinates.GCRS` instance
With velocity included.
"""
# do this here to prevent a series of complicated circular imports
from .builtin_frames import GCRS
loc, vel = self.get_gcrs_posvel(obstime)
loc.differentials["s"] = CartesianDifferential.from_cartesian(vel)
return GCRS(loc, obstime=obstime)
def _get_gcrs_posvel(self, obstime, ref_to_itrs, gcrs_to_ref):
"""Calculate GCRS position and velocity given transformation matrices.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This private method is used in intermediate_rotation_transforms,
where some of the matrices are already available for the coordinate
transformation.
The method is faster by an order of magnitude than just adding a zero
velocity to ITRS and transforming to GCRS, because it avoids calculating
the velocity via finite differencing of the results of the transformation
at three separate times.
"""
# The simplest route is to transform to the reference frame where the
# z axis is properly aligned with the Earth's rotation axis (CIRS or
# TETE), then calculate the velocity, and then transform this
# reference position and velocity to GCRS. For speed, though, we
# transform the coordinates to GCRS in one step, and calculate the
# velocities by rotating around the earth's axis transformed to GCRS.
ref_to_gcrs = matrix_transpose(gcrs_to_ref)
itrs_to_gcrs = ref_to_gcrs @ matrix_transpose(ref_to_itrs)
# Earth's rotation vector in the ref frame is rot_vec_ref = (0,0,OMEGA_EARTH),
# so in GCRS it is rot_vec_gcrs[..., 2] @ OMEGA_EARTH.
rot_vec_gcrs = CartesianRepresentation(
ref_to_gcrs[..., 2] * OMEGA_EARTH, xyz_axis=-1, copy=False
)
# Get the position in the GCRS frame.
# Since we just need the cartesian representation of ITRS, avoid get_itrs().
itrs_cart = CartesianRepresentation(self.x, self.y, self.z, copy=False)
pos = itrs_cart.transform(itrs_to_gcrs)
vel = rot_vec_gcrs.cross(pos)
return pos, vel
def get_gcrs_posvel(self, obstime):
"""
Calculate the GCRS position and velocity of this object at the
requested ``obstime``.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the GCRS position/velocity at.
Returns
-------
obsgeoloc : `~astropy.coordinates.CartesianRepresentation`
The GCRS position of the object
obsgeovel : `~astropy.coordinates.CartesianRepresentation`
The GCRS velocity of the object
"""
# Local import to prevent circular imports.
from .builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
)
# Get gcrs_posvel by transforming via CIRS (slightly faster than TETE).
return self._get_gcrs_posvel(
obstime, cirs_to_itrs_mat(obstime), gcrs_to_cirs_mat(obstime)
)
def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
@property
def x(self):
"""The X component of the geocentric coordinates."""
return self["x"]
@property
def y(self):
"""The Y component of the geocentric coordinates."""
return self["y"]
@property
def z(self):
"""The Z component of the geocentric coordinates."""
return self["z"]
def __getitem__(self, item):
result = super().__getitem__(item)
if result.dtype is self.dtype:
return result.view(self.__class__)
else:
return result.view(u.Quantity)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "_ellipsoid"):
self._ellipsoid = obj._ellipsoid
def __len__(self):
if self.shape == ():
raise IndexError("0-d EarthLocation arrays cannot be indexed")
else:
return super().__len__()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
# Conversion to another unit in both ``to`` and ``to_value`` goes
# via this routine. To make the regular quantity routines work, we
# temporarily turn the structured array into a regular one.
array_view = self.view(self._array_dtype, np.ndarray)
if equivalencies == []:
equivalencies = self._equivalencies
new_array = self.unit.to(unit, array_view, equivalencies=equivalencies)
return new_array.view(self.dtype).reshape(self.shape)
geodetic_base_doc = """{__doc__}
Parameters
----------
lon, lat : angle-like
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle` and either
`~astropy.coordinates.Longitude` not `~astropy.coordinates.Latitude`,
depending on the parameter.
height : `~astropy.units.Quantity` ['length']
The height to the point(s).
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
@format_doc(geodetic_base_doc)
class BaseGeodeticRepresentation(BaseRepresentation):
"""Base geodetic representation."""
attr_classes = {"lon": Longitude, "lat": Latitude, "height": u.Quantity}
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if "_ellipsoid" in cls.__dict__:
ELLIPSOIDS[cls._ellipsoid] = cls
def __init__(self, lon, lat=None, height=None, copy=True):
if height is None and not isinstance(lon, self.__class__):
height = 0 << u.m
super().__init__(lon, lat, height, copy=copy)
if not self.height.unit.is_equivalent(u.m):
raise u.UnitTypeError(
f"{self.__class__.__name__} requires height with units of length."
)
def to_cartesian(self):
"""
Converts WGS84 geodetic coordinates to 3D rectangular (geocentric)
cartesian coordinates.
"""
xyz = erfa.gd2gc(
getattr(erfa, self._ellipsoid), self.lon, self.lat, self.height
)
return CartesianRepresentation(xyz, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates (assumed geocentric) to
WGS84 geodetic coordinates.
"""
lon, lat, height = erfa.gc2gd(
getattr(erfa, cls._ellipsoid), cart.get_xyz(xyz_axis=-1)
)
return cls(lon, lat, height, copy=False)
@format_doc(geodetic_base_doc)
class WGS84GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS84 3D geodetic coordinates."""
_ellipsoid = "WGS84"
@format_doc(geodetic_base_doc)
class WGS72GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in WGS72 3D geodetic coordinates."""
_ellipsoid = "WGS72"
@format_doc(geodetic_base_doc)
class GRS80GeodeticRepresentation(BaseGeodeticRepresentation):
"""Representation of points in GRS80 3D geodetic coordinates."""
_ellipsoid = "GRS80"
|
bc2faeaf4fffd5cf33f72e6538c0a4fc0f6797c771cdc8a573b6a99fadd2bbbc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import contextlib
import errno
import hashlib
import io
import itertools
import os
import pathlib
import platform
import random
import shutil
import stat
import sys
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
import astropy.utils.data
from astropy import units as _u # u is taken
from astropy.config import paths
from astropy.utils.data import (
CacheDamaged,
CacheMissingWarning,
_deltemps,
_get_download_cache_loc,
_tempfilestodel,
cache_contents,
cache_total_size,
check_download_cache,
check_free_space_in_dir,
clear_download_cache,
compute_hash,
conf,
download_file,
download_files_in_parallel,
export_download_cache,
get_cached_urls,
get_file_contents,
get_free_space_in_dir,
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
get_readable_fileobj,
import_download_cache,
import_file_to_cache,
is_url,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", "false") == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "w") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmp_path):
def _valid_urls(tmp_path):
for i in itertools.count():
c = os.urandom(16).hex()
fn = tmp_path / f"valid_{str(i)}"
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmp_path)
@pytest.fixture
def invalid_urls(tmp_path):
def _invalid_urls(tmp_path):
for i in itertools.count():
fn = tmp_path / f"invalid_{str(i)}"
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmp_path)
@pytest.fixture
def temp_cache(tmp_path):
with paths.set_temp_cache(tmp_path):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmp_path, valid_urls):
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmp_path, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM, "os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM, "os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM, "_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(
astropy.utils.data, "_SafeTemporaryDirectory", no_TemporaryDirectory
)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://" + "a" * 256 + ".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmp_path):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmp_path):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmp_path):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel
):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel(
[u for (u, c, c_bad) in urls], cache=True, sources=sources
)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls
):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(
temp_cache, tmp_path
):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmp_path / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmp_path, temp_cache):
with TemporaryDirectory(dir=tmp_path) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {"cafile": None, "capath": "/does/not/exist"}
msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed"
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(
TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True
)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url + s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all(os.path.isfile(f) for f in fnout), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmp_path, valid_urls, method):
urls = []
# tmp_path is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmp_path):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(
temp_cache, valid_urls, invalid_urls
):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmp_path):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for fn, u, c in td:
c_plus = f"{c} updated"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
"filename", ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if (not HAS_BZ2 and "bz2" in filename) or (not HAS_LZMA and "xz" in filename):
with pytest.raises(ValueError, match=r" format files are not supported"):
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmp_path):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmp_path / request.param
filename = str(datafile)
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write_bytes(contents)
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(
ModuleNotFoundError, match=r"does not provide the [lb]z[2m]a? module\."
):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmp_path):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmp_path / "tmp.dat"
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname="astropy")
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"]
if n_warns == 4:
partial_warn_msgs.extend(["socket", "socket"])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert (
len(partial_warn_msgs) == 0
), f"Got some unexpected warnings: {partial_warn_msgs}"
assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}"
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(
CacheMissingWarning, match=r".*Not clearing data cache - cache inaccessible.*"
):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
"filename",
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
# fmt: off
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0\xd7\x95"
b"\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
# fmt: on
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmp_path, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = tmp_path / "the.zip"
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmp_path, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmp_path, temp_cache, valid_urls):
zip_file_name = tmp_path / "the.zip"
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmp_path):
fn = tmp_path / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding="binary") == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding="binary") != c
def test_export_import_roundtrip_different_location(tmp_path, valid_urls):
original_cache = tmp_path / "original"
original_cache.mkdir()
zip_file_name = tmp_path / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmp_path / "new"
new_cache.mkdir()
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for u, c in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for u, c, h in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize("desired_size", [1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmp_path, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(tmp_path, desired_size)
def test_get_free_space_file_directory(tmp_path):
fn = tmp_path / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(fn)
free_space = get_free_space_in_dir(tmp_path)
assert free_space > 0 and not hasattr(free_space, "unit")
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(tmp_path, unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(tmp_path, unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmp_path):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmp_path))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmp_path):
fn = str(tmp_path / "file")
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "w") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "w") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "w") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = tmp_path / "file"
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = tmp_path / "astropy"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download" / "url"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmp_path, valid_urls):
u, c = next(valid_urls)
d1 = tmp_path / "1"
d2 = tmp_path / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmp_path))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = list(tmp_path.iterdir())
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert (
f.read().rstrip()
== "This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type("MockOpener", (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmp_path):
try:
with readonly_dir(tmp_path):
assert is_dir_readonly(tmp_path)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmp_path):
fn = tmp_path / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmp_path):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW + 1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW + 1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)], pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp("allow_internet", False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), "url"))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f) + "/")
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif(
CI and os.environ.get("IS_CRON", "false") == "false",
reason="Flaky/too much external traffic for regular CI",
)
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
"""Test that download automatically enables TLS/SSL when required"""
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.daily"
download_file(url)
@pytest.mark.parametrize("base", ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file("file://", cache=True, sources=[u])
assert not is_url_in_cache("file:///")
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = [
"Name or service not known",
"nodename nor servname provided, or not known",
"getaddrinfo failed",
"Temporary failure in name resolution",
"No address associated with hostname",
]
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
("s", "ans"),
[
("http://googlecom", True),
("https://google.com", True),
("ftp://google.com", True),
("sftp://google.com", True),
("ssh://google.com", True),
("file:///c:/path/to/the%20file.txt", True),
("google.com", False),
("C:\\\\path\\\\file.docx", False),
("data://file", False),
],
)
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
b24da1a951a6f34db5ac5b816cdbf8bf484e49bf3bdf3c573e8eba4ce72f9617 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.units.tests.test_quantity_non_ufuncs import get_wrapped_functions
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .test_masked import MaskedArraySetup, assert_masked_equal
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(
func(self.a, *args, **kwargs), mask=func(self.mask_a, *args, **kwargs)
)
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(
func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs),
)
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize("axes", [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in axes if isinstance(axes, tuple) else (axes,):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match="not.*correct shape"):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1 + 2j, 3 + 4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize("value", [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize("value", [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop("ma_list", [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
self.check(np.concatenate, dtype="f4")
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0.0, Masked(1.0, True)], [Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.0], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50.0, 25.0], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50.0, 25.0])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.0).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_max(self):
self.check(np.max, method="max")
def test_min(self):
self.check(np.min, method="min")
def test_amax(self):
self.check(np.amax, method="max")
def test_amin(self):
self.check(np.amin, method="min")
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method="any")
def test_alltrue(self):
self.check(np.alltrue, method="all")
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method="prod")
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method="cumprod")
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round, method="round")
def test_round_(self):
if NUMPY_LT_1_25:
self.check(np.round_, method="round")
else:
with pytest.warns(
DeprecationWarning, match="`round_` is deprecated as of NumPy 1.25.0"
):
self.check(np.round_, method="round")
def test_around(self):
self.check(np.around, method="round")
def test_clip(self):
self.check(np.clip, 2.0, 4.0)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.0)
expected = np.where(mask, self.a, 1000.0)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.0)
expected2 = np.where(mask, self.a, 1000.0)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match="either both or neither"):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default)
expected = np.select(
[a < 1.5, a > 3.5],
[a, a + 1],
default=-1 if default is not np.ma.masked else 0,
)
expected_mask = np.select(
[a < 1.5, a > 3.5],
[mask_a, mask_a],
default=getattr(default, "mask", False),
)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.0], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3.0, 4.0]] * 2)
self.mask_a = np.array([[False] * 5, [True] * 4 + [False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == "b"
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1.0 + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1.0 + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1.0 + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1.0 + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)[
self.mask_a | self.mask_b
].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape(
result.shape
)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.0).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
# Check function by comparing to nan-equivalent, with masked
# values set to NaN.
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, "nan" + function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
# Also check that we can give an output MaskedArray.
if NUMPY_LT_1_25 and kwargs.get("keepdims", False):
# numpy bug gh-22714 prevents using out with keepdims=True.
# This is fixed in numpy 1.25.
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
# But that a regular array cannot be used since it has no mask.
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(self, axis, keepdims):
self.check(np.median, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_quantile(self, axis, keepdims):
self.check(np.quantile, q=[0.25, 0.5], axis=axis, keepdims=keepdims)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match="must be in the range"):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.0)
mask = np.concatenate(
[np.ones((2, 1), bool), self.mask_a, np.zeros((2, 1), bool)], axis=-1
)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [
(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack(
[
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2],
],
axis=-1,
),
]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1.0, 7.0).reshape(2, 3)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10.0, 3.0])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(
self.mask_a | self.mask_b, expected.shape
).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.0)
fp = np.array([1.0, 5.0, 6.0, 19.0, 20.0])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.0])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.0])
expected = np.piecewise(self.a, condlist, [-1, 1.0])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(
self.ma,
condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.0), mask=~x.mask)],
)
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(
self.mask_a, condlist2, [True, False, lambda x: ~x]
)
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match="with 2 condition"):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True] + [False] * 5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(
np.array([1 + 2j, 0 + 4j, 3 + 0j, -1 - 1j]),
mask=[True, False, False, False],
)
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex}
)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype="u1")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize("axis", [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(["2020-12-31", "2021-01-01", "2021-01-02"], dtype="M")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([["2021-01-07"], ["2021-01-31"]], dtype="M")
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestNaNFunctions:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
504154abb73956d6c6437a9b64d984b26ec80a097d3486a03796d90b382bf671 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
from functools import partial
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import BaseCoordinateFrame, SkyCoord
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .coordinates_map import CoordinatesMap
from .frame import RectangularFrame, RectangularFrame1D
from .transforms import CoordinateTransform
from .utils import get_coord_meta, transform_contour_set_inplace
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ["WCSAxes", "WCSAxesSubplot"]
VISUAL_PROPERTIES = ["facecolor", "edgecolor", "linewidth", "alpha", "linestyle"]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way.
"""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
*args
``*args`` can be a single ``(left, bottom, width, height)``
rectangle or a single `matplotlib.transforms.Bbox`. This specifies
the rectangle (in figure coordinates) where the Axes is positioned.
``*args`` can also consist of three numbers or a single three-digit
number; in the latter case, the digits are considered as
independent numbers. The numbers are interpreted as ``(nrows,
ncols, index)``: ``(nrows, ncols)`` specifies the size of an array
of subplots, and ``index`` is the 1-based index of the subplot
being created. Finally, ``*args`` can also directly be a
`matplotlib.gridspec.SubplotSpec` instance.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
Attributes
----------
coords : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
Container for coordinate information.
"""
def __init__(
self,
fig,
*args,
wcs=None,
transform=None,
coord_meta=None,
transData=None,
slices=None,
frame_class=None,
**kwargs,
):
""" """
super().__init__(fig, *args, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif wcs is not None and (
wcs.pixel_n_dim == 1 or (slices is not None and "y" not in slices)
):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if transData is not None:
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(
wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta
)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect("key_press_event", self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(
coord.format_coord(world[coord.coord_index], format="ascii")
)
coord_string = " ".join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == "w":
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop("origin", "lower")
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = "lower"
elif origin == "upper":
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion("PIL", "9.1"):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, "getpixel"):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop("transform", None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop("transform", None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def _transform_plot_args(self, *args, **kwargs):
"""
Apply transformations to arguments to ``plot_coord`` and
``scatter_coord``.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == "longitude":
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == "latitude":
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError(
"Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude."
)
if "transform" in kwargs.keys():
raise TypeError(
"The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame."
)
transform = self.get_transform(native_frame)
kwargs.update({"transform": transform})
args = tuple(plot_data) + args[1:]
return args, kwargs
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().plot(*args, **kwargs)
def scatter_coord(self, *args, **kwargs):
"""
Scatter `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.scatter`. All other arguments are the same as
`matplotlib.axes.Axes.scatter`. If not specified a ``transform``
keyword argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to scatter on the axes. This is converted to
the first two arguments to `matplotlib.axes.Axes.scatter`.
See Also
--------
matplotlib.axes.Axes.scatter : This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().scatter(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, "coords"):
previous_frame = {
"path": self.coords.frame._path,
"color": self.coords.frame.get_color(),
"linewidth": self.coords.frame.get_linewidth(),
}
else:
previous_frame = {"path": None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(
self.wcs, self.frame_class, slices=slices
)
self.coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame["path"],
)
self._transform_pixel2world = transform
if previous_frame["path"] is not None:
self.coords.frame.set_color(previous_frame["color"])
self.coords.frame.set_linewidth(previous_frame["linewidth"])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(
coord_meta.get("default_axislabel_position", ["b", "l"])
):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticklabel_position", ["b", "l"])
):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticks_position", ["bltr", "bltr"])
):
self.coords[ind].set_ticks_position(pos)
if rcParams["axes.grid"]:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
# Draw grids
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
# Draw tick labels
for coord in coords:
coord._draw_ticks(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
# Draw axis labels
for coord in coords:
coord._draw_axislabels(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks,
)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop("label", None)
if xlabel is None:
raise TypeError(
"set_xlabel() missing 1 required positional argument: 'xlabel'"
)
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label."""
if ylabel is None:
ylabel = kwargs.pop("label", None)
if ylabel is None:
raise TypeError(
"set_ylabel() missing 1 required positional argument: 'ylabel'"
)
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
"""Get coordinates overlay on given frame.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame`
Frame to get overlay for. If a string must correspond to
one of the coordinate frames registered in the astropy
frame transform graph.
coord_meta : dict
Metadata for the coordinates overlay.
Returns
-------
overlay : `~astropy.visualization.wcsaxes.CoordinatesMap`
Coordinates overlay.
"""
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position("t")
coords[1].set_axislabel_position("r")
coords[0].set_ticklabel_position("t")
coords[1].set_ticklabel_position("r")
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame.
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (
self._transform_pixel2world
+ CoordinateTransform(
self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in,
)
+ transform_world2pixel
)
elif isinstance(frame, str) and frame == "pixel":
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == "world":
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(
self._transform_pixel2world.frame_out, frame
)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
# Do a draw to populate the self._bboxes list
self.draw_wcsaxes(renderer)
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis="both", *, which="major", **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, "coords"):
return
if which != "major":
raise NotImplementedError(
"Plotting the grid for the minor ticks is not supported."
)
if axis == "both":
self.coords.grid(draw_grid=b, **kwargs)
elif axis == "x":
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == "y":
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError("axis should be one of x/y/both")
def tick_params(self, axis="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, "coords"):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == "both":
for pos in ("bottom", "left", "top", "right"):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if "label" + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ("x", "y") and self.frame_class is RectangularFrame:
spine = "b" if axis == "x" else "l"
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes.
"""
pass
|
1fc8c506a90a15f25057f5b1cbfa2c9b119385fcb6f82659c8569b738bf79ecc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import textwrap
from collections import OrderedDict
from itertools import chain, permutations
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataArray
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.utils import NumpyRNGContext
from astropy.utils.masked import Masked
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper, SlicedLowLevelWCS
from .test_nduncertainty import FakeUncertainty
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self):
pass
def __array__(self):
pass
@property
def dtype(self):
return "fake"
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
psf=None,
):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._psf = psf
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize("data", [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behave the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
# provide a quantity and override the unit
ndd_unit = NDData(data * u.erg, unit=u.J)
assert ndd_unit.unit == u.J
np.testing.assert_allclose((ndd_unit.data * ndd_unit.unit).to_value(u.erg), data)
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = Masked(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(
np.array([1]),
mask=False,
uncertainty=StdDevUncertainty(10),
unit=u.s,
meta={"dest": "mordor"},
wcs=WCS(naxis=1),
psf=np.array([10]),
)
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs is nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# now what happens if we overwrite them all too
nd3 = NDData(
nd1,
mask=True,
uncertainty=StdDevUncertainty(200),
unit=u.km,
meta={"observer": "ME"},
wcs=WCS(naxis=1),
psf=np.array([20]),
)
assert nd3.data is nd1.data
assert nd3.wcs is not nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
assert nd3.psf != nd1.psf
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, "gollum", 100, 12)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(
np.array([1, 2]),
uncert,
3,
HighLevelWCSWrapper(WCS(naxis=1)),
{"enemy": "black knight"},
u.km,
)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs is bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({"a": "dict"})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return "7"
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5)) * 2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=WCS(naxis=1))
assert nd.wcs is not None
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=WCS(naxis=1))
assert nd2.wcs is not None and nd2.wcs is not nd.wcs
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={"image": "sun"})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={"image": "moon"})
assert len(nd3.meta) == 1
assert nd3.meta["image"] == "moon"
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit="meter")
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit="cm")
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd2 = NDData(mq, unit=u.pc)
assert nd2.unit == u.pc
# (another NDData as data)
nd3 = NDData(nd, unit="km")
assert nd3.unit == u.km
# (MaskedQuantity given to NDData)
mq_astropy = Masked.from_unmasked(q, False)
nd4 = NDData(mq_astropy, unit="km")
assert nd4.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array, uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.0]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == "[1 2 3]"
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent(
"""
[[1 2]
[3 4]]"""[
1:
]
)
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent(
"""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[
1:
]
)
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
assert str(arr) == "[1 2 3] km"
# what if it had these units?
arr = NDData(np.array([1, 2, 3]), unit="erg cm^-2 s^-1 A^-1")
assert str(arr) == "[1 2 3] erg / (A cm2 s)"
def test_nddata_repr():
# The big test is eval(repr()) should be equal to the original!
arr1d = NDData(np.array([1, 2, 3]))
s = repr(arr1d)
assert s == "NDData([1, 2, 3])"
got = eval(s)
assert np.all(got.data == arr1d.data)
assert got.unit == arr1d.unit
arr2d = NDData(np.array([[1, 2], [3, 4]]))
s = repr(arr2d)
assert s == textwrap.dedent(
"""
NDData([[1, 2],
[3, 4]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr2d.data)
assert got.unit == arr2d.unit
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
s = repr(arr3d)
assert s == textwrap.dedent(
"""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr3d.data)
assert got.unit == arr3d.unit
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
s = repr(arr)
assert s == "NDData([1, 2, 3], unit='km')"
got = eval(s)
assert np.all(got.data == arr.data)
assert got.unit == arr.unit
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
def test_nddata_wcs_setter_error_cases():
ndd = NDData(np.ones((5, 5)))
# Setting with a non-WCS should raise an error
with pytest.raises(TypeError):
ndd.wcs = "I am not a WCS"
naxis = 2
# This should succeed since the WCS is currently None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
with pytest.raises(ValueError):
# This should fail since the WCS is not None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
def test_nddata_wcs_setter_with_low_level_wcs():
ndd = NDData(np.ones((5, 5)))
wcs = WCS()
# If the wcs property is set with a low level WCS it should get
# wrapped to high level.
low_level = SlicedLowLevelWCS(wcs, 5)
assert not isinstance(low_level, BaseHighLevelWCS)
ndd.wcs = low_level
assert isinstance(ndd.wcs, BaseHighLevelWCS)
def test_nddata_init_with_low_level_wcs():
wcs = WCS()
low_level = SlicedLowLevelWCS(wcs, 5)
ndd = NDData(np.ones((5, 5)), wcs=low_level)
assert isinstance(ndd.wcs, BaseHighLevelWCS)
class NDDataCustomWCS(NDData):
@property
def wcs(self):
return WCS()
def test_overriden_wcs():
# Check that a sub-class that overrides `.wcs` without providing a setter
# works
NDDataCustomWCS(np.ones((5, 5)))
# set up parameters for test_collapse:
np.random.seed(42)
collapse_units = [None, u.Jy]
collapse_propagate = [True, False]
collapse_data_shapes = [
# 3D example:
(4, 3, 2),
# 5D example
(6, 5, 4, 3, 2),
]
collapse_ignore_masked = [True, False]
collapse_masks = list(
chain.from_iterable(
[
# try the operations without a mask (all False):
np.zeros(collapse_data_shape).astype(bool)
]
+ [
# assemble a bunch of random masks:
np.random.randint(0, 2, size=collapse_data_shape).astype(bool)
for _ in range(10)
]
for collapse_data_shape in collapse_data_shapes
)
)
# the following provides pytest.mark.parametrize with every
# permutation of (1) the units, (2) propagating/not propagating
# uncertainties, and (3) the data shapes of different ndim.
permute = (
len(collapse_masks)
* len(collapse_propagate)
* len(collapse_units)
* len(collapse_ignore_masked)
)
collapse_units = permute // len(collapse_units) * collapse_units
collapse_propagate = permute // len(collapse_propagate) * collapse_propagate
collapse_masks = permute // len(collapse_masks) * collapse_masks
collapse_ignore_masked = permute // len(collapse_ignore_masked) * collapse_ignore_masked
@pytest.mark.parametrize(
"mask, unit, propagate_uncertainties, operation_ignores_mask",
zip(collapse_masks, collapse_units, collapse_propagate, collapse_ignore_masked),
)
def test_collapse(mask, unit, propagate_uncertainties, operation_ignores_mask):
# unique set of combinations of each of the N-1 axes for an N-D cube:
axes_permutations = {tuple(axes[:2]) for axes in permutations(range(mask.ndim))}
# each of the single axis slices:
axes_permutations.update({axis for axis in range(mask.ndim)})
axes_permutations.update({None})
cube = np.arange(np.prod(mask.shape)).reshape(mask.shape)
numpy_cube = np.ma.masked_array(cube, mask=mask)
ma_cube = Masked(cube, mask=mask)
ndarr = NDDataArray(cube, uncertainty=StdDevUncertainty(cube), unit=unit, mask=mask)
# By construction, the minimum value along each axis is always the zeroth index and
# the maximum is always the last along that axis. We verify that here, so we can
# test that the correct uncertainties are extracted during the
# `NDDataArray.min` and `NDDataArray.max` methods later:
for axis in range(cube.ndim):
assert np.all(np.equal(cube.argmin(axis=axis), 0))
assert np.all(np.equal(cube.argmax(axis=axis), cube.shape[axis] - 1))
# confirm that supported nddata methods agree with corresponding numpy methods
# for the masked data array:
sum_methods = ["sum", "mean"]
ext_methods = ["min", "max"]
all_methods = sum_methods + ext_methods
# for all supported methods, ensure the masking is propagated:
for method in all_methods:
for axes in axes_permutations:
astropy_method = getattr(ma_cube, method)(axis=axes)
numpy_method = getattr(numpy_cube, method)(axis=axes)
nddata_method = getattr(ndarr, method)(
axis=axes,
propagate_uncertainties=propagate_uncertainties,
operation_ignores_mask=operation_ignores_mask,
)
astropy_unmasked = astropy_method.base[~astropy_method.mask]
nddata_unmasked = nddata_method.data[~nddata_method.mask]
# check if the units are passed through correctly:
assert unit == nddata_method.unit
# check if the numpy and astropy.utils.masked results agree when
# the result is not fully masked:
if len(astropy_unmasked) > 0:
if not operation_ignores_mask:
# compare with astropy
assert np.all(np.equal(astropy_unmasked, nddata_unmasked))
assert np.all(np.equal(astropy_method.mask, nddata_method.mask))
else:
# compare with numpy
assert np.ma.all(
np.ma.equal(numpy_method, np.asanyarray(nddata_method))
)
# For extremum methods, ensure the uncertainty returned corresponds to the
# min/max data value. We've created the uncertainties to have the same value
# as the data array, so we can just check for equality:
if method in ext_methods and propagate_uncertainties:
assert np.ma.all(np.ma.equal(astropy_method, nddata_method))
|
1865401c13660bbaa8bea15f9edd10ce1c2e56770ef67fd093bf6c8ec90fa2bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import re
import unicodedata
import warnings
from fractions import Fraction
from astropy.utils import classproperty, deprecated, parsing
from astropy.utils.misc import did_you_mean
from . import core
from .base import Base
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_tokens = (
"COMMA",
"DOUBLE_STAR",
"STAR",
"PERIOD",
"SOLIDUS",
"CARET",
"OPEN_PAREN",
"CLOSE_PAREN",
"FUNCNAME",
"UNIT",
"SIGN",
"UINT",
"UFLOAT",
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_COMMA = r"\,"
t_STAR = r"\*"
t_PERIOD = r"\."
t_SOLIDUS = r"/"
t_DOUBLE_STAR = r"\*\*"
t_CARET = r"\^"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
elif t.value.endswith("."):
t.type = "UINT"
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = int(t.value + "1")
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r"((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()"
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="generic_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
tokens = cls._tokens
def p_main(p):
"""
main : unit
| structured_unit
| structured_subunit
"""
if isinstance(p[1], tuple):
# Unpack possible StructuredUnit inside a tuple, ie.,
# ignore any set of very outer parentheses.
p[0] = p[1][0]
else:
p[0] = p[1]
def p_structured_subunit(p):
"""
structured_subunit : OPEN_PAREN structured_unit CLOSE_PAREN
"""
# We hide a structured unit enclosed by parentheses inside
# a tuple, so that we can easily distinguish units like
# "(au, au/day), yr" from "au, au/day, yr".
p[0] = (p[2],)
def p_structured_unit(p):
"""
structured_unit : subunit COMMA
| subunit COMMA subunit
"""
from astropy.units.structured import StructuredUnit
inputs = (p[1],) if len(p) == 3 else (p[1], p[3])
units = ()
for subunit in inputs:
if isinstance(subunit, tuple):
# Structured unit that should be its own entry in the
# new StructuredUnit (was enclosed in parentheses).
units += subunit
elif isinstance(subunit, StructuredUnit):
# Structured unit whose entries should be
# individiually added to the new StructuredUnit.
units += subunit.values()
else:
# Regular unit to be added to the StructuredUnit.
units += (subunit,)
p[0] = StructuredUnit(units)
def p_subunit(p):
"""
subunit : unit
| structured_unit
| structured_subunit
"""
p[0] = p[1]
def p_unit(p):
"""
unit : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
"""
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
"""
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
"""
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
"""
inverse_unit : division unit_expression
"""
p[0] = p[2] ** -1
def p_factor(p):
"""
factor : factor_fits
| factor_float
| factor_int
"""
p[0] = p[1]
def p_factor_float(p):
"""
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
"""
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
"""
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
"""
if p[1] != 10:
if cls.name == "fits":
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ("**", "^"):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
"""
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
"""
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
"""
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
"""
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
"""
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
"""
paren_expr : sign UINT
| signed_float
| frac
"""
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
"""
frac : sign UINT division sign UINT
"""
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
"""
product : STAR
| PERIOD
"""
pass
def p_division(p):
"""
division : SOLIDUS
"""
pass
def p_power(p):
"""
power : DOUBLE_STAR
| CARET
"""
p[0] = p[1]
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_function_name(p):
"""
function_name : FUNCNAME
"""
p[0] = p[1]
def p_function(p):
"""
function : function_name OPEN_PAREN main CLOSE_PAREN
"""
if p[1] == "sqrt":
p[0] = p[3] ** 0.5
return
elif p[1] in ("mag", "dB", "dex"):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError(f"'{p[1]}' is not a recognized function")
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="generic_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s in cls._unit_symbols:
s = cls._unit_symbols[s]
elif not s.isascii():
if s[0] == "\N{MICRO SIGN}":
s = "u" + s[1:]
if s[-1] in cls._prefixable_unit_symbols:
s = s[:-1] + cls._prefixable_unit_symbols[s[-1]]
elif len(s) > 1 and s[-1] in cls._unit_suffix_symbols:
s = s[:-1] + cls._unit_suffix_symbols[s[-1]]
elif s.endswith("R\N{INFINITY}"):
s = s[:-2] + "Ry"
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(f"{s} is not a valid unit. {did_you_mean(s, registry)}")
else:
raise ValueError()
_unit_symbols = {
"%": "percent",
"\N{PRIME}": "arcmin",
"\N{DOUBLE PRIME}": "arcsec",
"\N{MODIFIER LETTER SMALL H}": "hourangle",
"e\N{SUPERSCRIPT MINUS}": "electron",
}
_prefixable_unit_symbols = {
"\N{GREEK CAPITAL LETTER OMEGA}": "Ohm",
"\N{LATIN CAPITAL LETTER A WITH RING ABOVE}": "Angstrom",
"\N{SCRIPT SMALL L}": "l",
}
_unit_suffix_symbols = {
"\N{CIRCLED DOT OPERATOR}": "sun",
"\N{SUN}": "sun",
"\N{CIRCLED PLUS}": "earth",
"\N{EARTH}": "earth",
"\N{JUPITER}": "jupiter",
"\N{LATIN SUBSCRIPT SMALL LETTER E}": "_e",
"\N{LATIN SUBSCRIPT SMALL LETTER P}": "_p",
}
_translations = str.maketrans(
{
"\N{GREEK SMALL LETTER MU}": "\N{MICRO SIGN}",
"\N{MINUS SIGN}": "-",
}
)
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
"\N{SUPERSCRIPT MINUS}"
"\N{SUPERSCRIPT PLUS SIGN}"
"\N{SUPERSCRIPT ZERO}"
"\N{SUPERSCRIPT ONE}"
"\N{SUPERSCRIPT TWO}"
"\N{SUPERSCRIPT THREE}"
"\N{SUPERSCRIPT FOUR}"
"\N{SUPERSCRIPT FIVE}"
"\N{SUPERSCRIPT SIX}"
"\N{SUPERSCRIPT SEVEN}"
"\N{SUPERSCRIPT EIGHT}"
"\N{SUPERSCRIPT NINE}"
)
_superscript_translations = str.maketrans(_superscripts, "-+0123456789")
_regex_superscript = re.compile(f"[{_superscripts}]?[{_superscripts[2:]}]+")
_regex_deg = re.compile("°([CF])?")
@classmethod
def _convert_superscript(cls, m):
return f"({m.group().translate(cls._superscript_translations)})"
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return "deg"
return m.string.replace("°", "deg_")
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode("ascii")
elif not s.isascii():
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize("NFC", s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count("/")
if n_slashes > 1 and (n_slashes - len(re.findall(r"\(\d+/\d+\)", s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning,
)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _format_unit_list(cls, units):
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
return super()._format_unit_list(units)
# 2023-02-18: The statement in the docstring is no longer true, the class is not used
# anywhere so can be safely removed in 6.0.
@deprecated("5.3", alternative="Generic")
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
@classmethod
def to_string(cls, unit):
if unit.scale != 1:
unit = core.Unit(unit / unit.scale)
return super().to_string(unit)
|
b3ad6bd98e655436652d2f0a64ac5eb02580e1961abc6db8af2acd626aac097e | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.max, np.min, np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.round_, # Alias for np.round in NUMPY_LT_1_25, but deprecated since.
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equal(*args, equal_nan=equal_nan), None, None
@dispatched_function
def array_equiv(a1, a2):
try:
args, unit = _quantities2arrays(a1, a2)
except UnitConversionError:
return False, None, None
return np.array_equiv(*args), None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
def _check_range(range, unit):
range = _as_quantity(range)
range = range.to_value(unit)
return range
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if range is not None:
range = _check_range(range, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if range is not None:
range = tuple(
_check_range(r, unit) for (r, unit) in zip(range, (x.unit, y.unit))
)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if range is not None:
range = tuple(_check_range(r, unit) for (r, unit) in zip(range, sample_units))
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype.
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
b95a3845529e6e6ff3835cc9dc398f3b694e842a296ebfe5054bdfdcc0b39715 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import PrefixUnit, Unit, UnitBase, UnitsWarning, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize(
"strings, unit",
[
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m**2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m**-3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m**1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m**0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
],
)
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string", ["sin( /pixel /s)", "mag(mag)", "dB(dB(mW))", "dex()"]
)
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize(
"strings, unit",
[
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm**2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm**2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["km/s/Mpc"], u.km / u.s / u.Mpc),
(["km/(s.Mpc)"], u.km / u.s / u.Mpc),
(["10+3J/m/s/kpc2"], u.Unit(1e3 * u.W / (u.m * u.kpc**2))),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11/m"], u.Unit(1.5e11 / u.m)),
(["/s"], u.s**-1),
(["m2"], u.m**2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.0e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.0e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2**30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s**2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled)),
],
)
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"0.1 nm",
"solMass(3/2)",
"km / s",
"km s-1",
"km/s.Mpc-1",
"/s.Mpc",
"pix0.1nm",
"pix/(0.1nm)",
"km*s",
"km**2",
"5x8+3m",
"0.1---",
"---m",
"m---",
"--",
"0.1-",
"-m",
"m-",
"mag(s-1)",
"dB(mW)",
"dex(cm s-2)",
"[--]",
],
)
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit("---", format="cds") == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format="cds") == "---"
def test_cds_log10_dimensionless():
assert u.Unit("[-]", format="cds") == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format="cds") == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize(
"strings, unit",
[
(
["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s,
),
(
["/pixel /s", "/(pixel * s)"],
(u.pixel * u.s) ** -1,
),
(
[
"count /m**2 /s /eV",
"count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)",
],
u.count * u.m**-2 * u.s**-1 * u.eV**-1,
),
(
["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel),
),
(
["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom),
),
(
[
"10**(46) erg /s",
"10**46 erg /s",
"10**(39) J /s",
"10**(39) W",
"10**(15) YW",
"YJ /fs",
],
10**46 * u.erg / u.s,
),
(
[
"10**(-7) J /cm**2 /MeV",
"10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)",
"nJ /m**2 /eV",
],
10**-7 * u.J * u.cm**-2 * u.MeV**-1,
),
(
[
"sqrt(erg /pixel /s /GHz)",
"(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)",
],
(u.erg * u.pixel**-1 * u.s**-1 * u.GHz**-1) ** 0.5,
),
(
[
"(count /s) (/pixel /s)",
"(count /s) * (/pixel /s)",
"count /pixel /s**2",
],
(u.count / u.s) * (1.0 / (u.pixel * u.s)),
),
],
)
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"log(photon /m**2 /s /Hz)",
"sin( /pixel /s)",
"log(photon /cm**2 /s /Hz) /(sin( /pixel /s))",
"log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)",
"dB(mW)",
"dex(cm/s**2)",
],
)
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match="deprecated") as w:
a = Unit(s, format=self.format_)
assert len(w) == 1
else:
a = Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert " " not in s
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = "generic"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u.__dict__.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format="unicode")
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = "vounit"
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.VOUnit._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = "fits"
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.Fits._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = "cds"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.CDS._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize(
"unit", [u.dex(unit) for unit in (u.cm / u.s**2, u.K, u.Lsun)]
)
def test_roundtrip_dex(self, unit):
string = unit.to_string(format="cds")
recovered = u.Unit(string, format="cds")
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = "ogip"
deprecated_units = u_format.OGIP._deprecated_units | {"d"}
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.OGIP._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
if str(unit) in ("d", "0.001 Crab"):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ("mag", "byte", "Crab"):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match="power of 10")
elif str(unit) == "0.001 Crab":
ctx = pytest.warns(UnitsWarning, match="deprecated")
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string("latex") == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_new_style_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert f"{fluxunit:latex}" == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_latex_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex = r"$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$"
assert fluxunit.to_string("latex") == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex_inline = r"$\mathrm{1 \times 10^{-24}\,erg\,Hz^{-1}\,s^{-1}\,cm^{-2}}$"
assert fluxunit.to_string("latex_inline") == latex_inline
@pytest.mark.parametrize(
"format_spec, string, decomposed",
[
("generic", "erg / (Angstrom cm2 s)", "1e+07 kg / (m s3)"),
("s", "erg / (Angstrom cm2 s)", "1e+07 kg / (m s3)"),
("console", "erg Angstrom^-1 s^-1 cm^-2", "10000000 kg m^-1 s^-3"),
(
"latex",
r"$\mathrm{\frac{erg}{\mathring{A}\,s\,cm^{2}}}$",
r"$\mathrm{10000000\,\frac{kg}{m\,s^{3}}}$",
),
(
"latex_inline",
r"$\mathrm{erg\,\mathring{A}^{-1}\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{10000000\,kg\,m^{-1}\,s^{-3}}$",
),
("unicode", "erg Å⁻¹ s⁻¹ cm⁻²", "10000000 kg m⁻¹ s⁻³"),
(">25s", " erg / (Angstrom cm2 s)", " 1e+07 kg / (m s3)"),
("cds", "erg.Angstrom-1.s-1.cm-2", "10000000kg.m-1.s-3"),
("ogip", "10 erg / (cm**2 nm s)", "1e+07 kg / (m s**3)"),
("fits", "Angstrom-1 cm-2 erg s-1", "10**7 kg m-1 s-3"),
("vounit", "Angstrom**-1.cm**-2.erg.s**-1", "10000000kg.m**-1.s**-3"),
# TODO: make fits and vounit less awful!
],
)
def test_format_styles(format_spec, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s * u.Angstrom)
if format_spec == "vounit":
# erg is deprecated in vounit.
with pytest.warns(UnitsWarning, match="deprecated"):
formatted = format(fluxunit, format_spec)
else:
formatted = format(fluxunit, format_spec)
assert formatted == string
# Decomposed mostly to test that scale factors are dealt with properly
# in the various formats.
assert format(fluxunit.decompose(), format_spec) == decomposed
@pytest.mark.parametrize(
"format_spec, fraction, string, decomposed",
[
("generic", False, "cm-2 erg s-1", "0.001 kg s-3"),
(
"console",
"multiline",
" erg \n------\ns cm^2",
" kg \n0.001 ---\n s^3",
),
("console", "inline", "erg / (s cm^2)", "0.001 kg / s^3"),
("unicode", "multiline", " erg \n─────\ns cm²", " kg\n0.001 ──\n s³"),
("unicode", "inline", "erg / (s cm²)", "0.001 kg / s³"),
(
"latex",
False,
r"$\mathrm{erg\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{0.001\,kg\,s^{-3}}$",
),
(
"latex",
"inline",
r"$\mathrm{erg / (s\,cm^{2})}$",
r"$\mathrm{0.001\,kg / s^{3}}$",
),
# TODO: make generic with fraction=False less awful!
],
)
def test_format_styles_non_default_fraction(format_spec, fraction, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string(format_spec, fraction=fraction) == string
assert fluxunit.decompose().to_string(format_spec, fraction=fraction) == decomposed
@pytest.mark.parametrize("format_spec", ["generic", "cds", "fits", "ogip", "vounit"])
def test_no_multiline_fraction(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*not fraction='multiline'"):
fluxunit.to_string(format_spec, fraction="multiline")
@pytest.mark.parametrize(
"format_spec",
["generic", "cds", "fits", "ogip", "vounit", "latex", "console", "unicode"],
)
def test_unknown_fraction_style(format_spec):
fluxunit = u.W / u.m**2
with pytest.raises(ValueError, match="only supports.*parrot"):
fluxunit.to_string(format_spec, fraction="parrot")
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string("fits") == "erg Hz-1"
myunit2 = myunit * u.bit**3
assert myunit2.to_string("fits") == "bit3 erg Hz-1"
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string("fits")
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string("console")
@pytest.mark.parametrize(
"format,string",
[
("generic", "10"),
("console", "10"),
("unicode", "10"),
("cds", "10"),
("latex", r"$\mathrm{10}$"),
],
)
def test_scale_only(format, string):
unit = u.Unit(10)
assert unit.to_string(format) == string
def test_flexible_float():
assert u.min._represents.to_string("latex") == r"$\mathrm{60\,s}$"
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match="unit argument must be"):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm**2.0
assert "." not in area.to_string("latex")
fractional = u.cm**2.5
assert "5/2" in fractional.to_string("latex")
assert fractional.to_string("unicode") == "cm⁵⸍²"
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3.0 * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit("%") == u.percent == u.Unit(0.01)
assert u.Unit("%", format="cds") == u.Unit(0.01)
assert u.Unit(0.01).to_string("cds") == "%"
with pytest.raises(ValueError):
u.Unit("%", format="fits")
with pytest.raises(ValueError):
u.Unit("%", format="vounit")
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit("0.1") == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit("1.e-4") == u.Unit(1.0e-4)
assert u.Unit("10-4", format="cds") == u.Unit(1.0e-4)
assert u.Unit("10+8").to_string("cds") == "10+8"
with pytest.raises(ValueError):
u.Unit(0.15).to_string("fits")
assert u.Unit(0.1).to_string("fits") == "10**-1"
with pytest.raises(ValueError):
u.Unit(0.1).to_string("vounit")
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit("ANGSTROM", format="fits")
assert "Did you mean Angstrom or angstrom?" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit("crab", format="ogip")
assert "Crab (deprecated)" in str(exc_info.value)
assert "mCrab (deprecated)" in str(exc_info.value)
with pytest.warns(
UnitsWarning,
match=r".* Did you mean 0\.1nm, Angstrom "
r"\(deprecated\) or angstrom \(deprecated\)\?",
) as w:
u.Unit("ANGSTROM", format="vounit")
assert len(w) == 1
assert str(w[0].message).count("0.1nm") == 1
with pytest.warns(UnitsWarning, match=r".* 0\.1nm\.") as w:
u.Unit("angstrom", format="vounit")
assert len(w) == 1
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
# ct, dex also raise warnings - irrelevant here.
warnings.simplefilter("ignore")
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit("KiB", format="vounit") == u.Unit("1024 B")
u.Unit("Kibyte", format="vounit") == u.Unit("1024 B")
u.Unit("Kibit", format="vounit") == u.Unit("1024 B")
with pytest.warns(UnitsWarning) as w:
u.Unit("kibibyte", format="vounit")
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit("unknown", format="vounit") is None
assert u.Unit("UNKNOWN", format="vounit") is None
assert u.Unit("", format="vounit") is u.dimensionless_unscaled
def test_vounit_details():
with pytest.warns(UnitsWarning, match="deprecated") as w:
assert u.Unit("Pa", format="vounit") is u.Pascal
assert len(w) == 1
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string("vounit") == "10m"
assert u.Unit("dam dag").to_string("vounit") == "100g.m"
# Parse round-trip
with pytest.warns(UnitsWarning, match="deprecated"):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == "Angstrom**-1.cm**-2.erg.s**-1"
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize(
"unit, vounit, number, scale, voscale",
[
("nm", "nm", 0.1, "10^-1", "0.1"),
("fm", "fm", 100.0, "10+2", "100"),
("m^2", "m**2", 100.0, "100.0", "100"),
("cm", "cm", 2.54, "2.54", "2.54"),
("kg", "kg", 1.898124597e27, "1.898124597E27", "1.8981246e+27"),
("m/s", "m.s**-1", 299792458.0, "299792458", "2.9979246e+08"),
("cm2", "cm**2", 1.0e-20, "10^(-20)", "1e-20"),
],
)
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f"{scale} {unit}")
assert x == number * u.Unit(unit)
assert x.to_string(format="vounit") == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format="vounit")
x_vounit = x.to_string("vounit")
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format="vounit")
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string("vounit")
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == "m mfoo"
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == "urlong"
assert len(w) == 2
assert "furlong" in str(w[0].message)
assert "week" in str(w[1].message)
@pytest.mark.parametrize(
"scale, number, string",
[
("10+2", 100, "10**2"),
("10(+2)", 100, "10**2"),
("10**+2", 100, "10**2"),
("10**(+2)", 100, "10**2"),
("10^+2", 100, "10**2"),
("10^(+2)", 100, "10**2"),
("10**2", 100, "10**2"),
("10**(2)", 100, "10**2"),
("10^2", 100, "10**2"),
("10^(2)", 100, "10**2"),
("10-20", 10 ** (-20), "10**-20"),
("10(-20)", 10 ** (-20), "10**-20"),
("10**-20", 10 ** (-20), "10**-20"),
("10**(-20)", 10 ** (-20), "10**-20"),
("10^-20", 10 ** (-20), "10**-20"),
("10^(-20)", 10 ** (-20), "10**-20"),
],
)
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + " erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " Angstrom-1 cm-2 erg s-1"
x = u.Unit(scale + "*erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " Angstrom-1 cm-2 erg s-1"
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit("1000 erg/(s cm**2 Angstrom)", format="fits")
with pytest.raises(ValueError):
x = u.Unit("12 erg/(s cm**2 Angstrom)", format="fits")
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format="fits")
x = u.Unit(100.0 * u.erg)
assert x.to_string(format="fits") == "10**2 erg"
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r"$\mathrm{{}^{\circ}}$"
assert (u.deg**2).to_string("latex") == r"$\mathrm{deg^{2}}$"
assert (u.arcmin).to_string("latex") == r"$\mathrm{{}^{\prime}}$"
assert (u.arcmin**2).to_string("latex") == r"$\mathrm{arcmin^{2}}$"
assert (u.arcsec).to_string("latex") == r"$\mathrm{{}^{\prime\prime}}$"
assert (u.arcsec**2).to_string("latex") == r"$\mathrm{arcsec^{2}}$"
assert (u.hourangle).to_string("latex") == r"$\mathrm{{}^{h}}$"
assert (u.hourangle**2).to_string("latex") == r"$\mathrm{hourangle^{2}}$"
assert (u.electron).to_string("latex") == r"$\mathrm{e^{-}}$"
assert (u.electron**2).to_string("latex") == r"$\mathrm{electron^{2}}$"
def test_no_prefix_superscript():
"""Regression test for gh-911 and #14419."""
assert u.mdeg.to_string("latex") == r"$\mathrm{mdeg}$"
assert u.narcmin.to_string("latex") == r"$\mathrm{narcmin}$"
assert u.parcsec.to_string("latex") == r"$\mathrm{parcsec}$"
assert u.mdeg.to_string("unicode") == "mdeg"
assert u.narcmin.to_string("unicode") == "narcmin"
assert u.parcsec.to_string("unicode") == "parcsec"
@pytest.mark.parametrize(
"power,expected",
(
(1.0, "m"),
(2.0, "m2"),
(-10, "1 / m10"),
(1.5, "m(3/2)"),
(2 / 3, "m(2/3)"),
(7 / 11, "m(7/11)"),
(-1 / 64, "1 / m(1/64)"),
(1 / 100, "m(1/100)"),
(2 / 101, "m(0.019801980198019802)"),
(Fraction(2, 101), "m(2/101)"),
),
)
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m**power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize(
"string,unit",
[
("\N{MICRO SIGN}g", u.microgram),
("\N{GREEK SMALL LETTER MU}g", u.microgram),
("g\N{MINUS SIGN}1", u.g ** (-1)),
("m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", 1 / u.m),
("m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m / u.s),
("m\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT THREE}", u.m**3),
("m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}", u.m**10),
("\N{GREEK CAPITAL LETTER OMEGA}", u.ohm),
("\N{OHM SIGN}", u.ohm), # deprecated but for compatibility
("\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}", u.microOhm),
("\N{ANGSTROM SIGN}", u.Angstrom),
("\N{ANGSTROM SIGN} \N{OHM SIGN}", u.Angstrom * u.Ohm),
("\N{LATIN CAPITAL LETTER A WITH RING ABOVE}", u.Angstrom),
("\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}", u.Angstrom),
("m\N{ANGSTROM SIGN}", u.milliAngstrom),
("°C", u.deg_C),
("°", u.deg),
("M⊙", u.Msun), # \N{CIRCLED DOT OPERATOR}
("L☉", u.Lsun), # \N{SUN}
("M⊕", u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
("M♁", u.Mearth), # be generous with \N{EARTH}
("R♃", u.Rjup), # \N{JUPITER}
("′", u.arcmin), # \N{PRIME}
("R∞", u.Ry),
("Mₚ", u.M_p),
],
)
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize(
"string",
[
"g\N{MICRO SIGN}",
"g\N{MINUS SIGN}",
"m\N{SUPERSCRIPT MINUS}1",
"m+\N{SUPERSCRIPT ONE}",
"m\N{MINUS SIGN}\N{SUPERSCRIPT ONE}",
"k\N{ANGSTROM SIGN}",
],
)
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize("format_", ("unicode", "latex", "latex_inline"))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match="not parse"):
u.Unit("m", format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit("m", format="foo")
def test_celsius_fits():
assert u.Unit("Celsius", format="fits") == u.deg_C
assert u.Unit("deg C", format="fits") == u.deg_C
# check that compounds do what we expect: what do we expect?
assert u.Unit("deg C kg-1", format="fits") == u.C * u.deg / u.kg
assert u.Unit("Celsius kg-1", format="fits") == u.deg_C / u.kg
assert u.deg_C.to_string("fits") == "Celsius"
@pytest.mark.parametrize(
"format_spec, string",
[
("generic", "dB(1 / m)"),
("latex", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{\frac{1}{m}} \right)}$"),
("latex_inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("console", "dB(m^-1)"),
("unicode", "dB(m⁻¹)"),
],
)
def test_function_format_styles(format_spec, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec) == string
assert f"{dbunit:{format_spec}}" == string
@pytest.mark.parametrize(
"format_spec, fraction, string",
[
("console", "multiline", " 1\ndB(-)\n m"),
("console", "inline", "dB(1 / m)"),
("unicode", "multiline", " 1\ndB(─)\n m"),
("unicode", "inline", "dB(1 / m)"),
("latex", False, r"$\mathrm{dB}$$\mathrm{\left( \mathrm{m^{-1}} \right)}$"),
("latex", "inline", r"$\mathrm{dB}$$\mathrm{\left( \mathrm{1 / m} \right)}$"),
],
)
def test_function_format_styles_non_default_fraction(format_spec, fraction, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec, fraction=fraction) == string
|
766688c4d7c8479efcd551b32e8fe1fb57a6b2e05059a755538e0e5b82a7d56f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_max(self):
self.check(np.max)
def test_min(self):
self.check(np.min)
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round)
def test_round_(self):
if NUMPY_LT_1_25:
self.check(np.round_)
else:
with pytest.warns(
DeprecationWarning, match="`round_` is deprecated as of NumPy 1.25.0"
):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@classmethod
def _range_value(cls, range, unit):
if isinstance(range, u.Quantity):
return range.to_value(unit)
else:
return [cls._range_value(r, unit) for r in range]
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_range(self, range):
self.check(
np.histogram,
self.x,
range=range,
value_args=[self.x.value],
value_kwargs=dict(range=self._range_value(range, self.x.unit)),
expected_units=(None, self.x.unit),
)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_bin_edges_range(self, range):
out_b = np.histogram_bin_edges(self.x, range=range)
expected_b = np.histogram_bin_edges(
self.x.value, range=self._range_value(range, self.x.unit)
)
assert np.all(out_b.value == expected_b)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogram2d_range(self, range):
self.check(
np.histogram2d,
self.x,
self.y,
range=range,
value_args=[self.x.value, self.y.value],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, self.x.unit, self.y.unit),
)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogramdd_range(self, range):
self.check(
np.histogramdd,
(self.x, self.y),
range=range,
value_args=[(self.x.value, self.y.value)],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, (self.x.unit, self.y.unit)),
)
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
43c36ec4aeedb6bacd7f6364218116d6335ef3d5f1a0a6ef2dd316b8c2a987e8 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import dataclasses
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
class DuckQuantity1:
data: u.Quantity
@dataclasses.dataclass
class DuckQuantity2(DuckQuantity1):
@property
def unit(self) -> u.UnitBase:
return self.data.unit
@dataclasses.dataclass(eq=False)
class DuckQuantity3(DuckQuantity2):
def __array_ufunc__(self, function, method, *inputs, **kwargs):
inputs = [inp.data if isinstance(inp, type(self)) else inp for inp in inputs]
out = kwargs.get("out", None)
kwargs_copy = {}
for k in kwargs:
kwarg = kwargs[k]
if isinstance(kwarg, type(self)):
kwargs_copy[k] = kwarg.data
elif isinstance(kwarg, (list, tuple)):
kwargs_copy[k] = type(kwarg)(
item.data if isinstance(item, type(self)) else item
for item in kwarg
)
else:
kwargs_copy[k] = kwarg
kwargs = kwargs_copy
for inp in inputs:
if isinstance(inp, np.ndarray):
result = inp.__array_ufunc__(function, method, *inputs, **kwargs)
if result is not NotImplemented:
if out is None:
return type(self)(result)
else:
if function.nout == 1:
return out[0]
else:
return out
return NotImplemented
class DuckQuantity4(DuckQuantity3):
@property
def unit(self):
return DuckQuantity1(1 * self.data.unit)
class TestUfuncReturnsNotImplemented:
@pytest.mark.parametrize("ufunc", (np.negative, np.abs))
class TestUnaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, duck_quantity):
with pytest.raises(TypeError, match="bad operand type for .*"):
ufunc(duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(np.empty_like(ufunc(duck_quantity.data)))
out_expected = np.empty_like(ufunc(duck_quantity.data))
result = ufunc(duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
@pytest.mark.parametrize("ufunc", (np.add, np.multiply, np.less))
@pytest.mark.parametrize("quantity", (1 * u.m, [1, 2] * u.m))
class TestBinaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, quantity, duck_quantity):
with pytest.raises(
(TypeError, ValueError),
match=(
r"(Unsupported operand type\(s\) for ufunc .*)|"
r"(unsupported operand type\(s\) for .*)|"
r"(Value not scalar compatible or convertible to an int, float, or complex array)"
),
):
ufunc(quantity, duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, quantity, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(
np.empty_like(ufunc(quantity, duck_quantity.data))
)
out_expected = np.empty_like(ufunc(quantity, duck_quantity.data))
result = ufunc(quantity, duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(quantity, duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
b2c0e86f3fb2457e8abc50c4675a4dbe9b4a34d9d27764c6c3c6347435a7535f | import os
from urllib.error import HTTPError, URLError
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import TETE
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.funcs import get_sun
from astropy.coordinates.representation import (
CartesianRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.solar_system import (
BODY_NAME_TO_KERNEL_SPEC,
_get_apparent_body_position,
get_body,
get_body_barycentric,
get_body_barycentric_posvel,
get_moon,
solar_system_ephemeris,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM, HAS_SKYFIELD
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
if HAS_SKYFIELD:
from skyfield.api import Loader, Topos
de432s_separation_tolerance_planets = 5 * u.arcsec
de432s_distance_tolerance = 20 * u.km
skyfield_angular_separation_tolerance = 1 * u.arcsec
skyfield_separation_tolerance = 10 * u.km
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_SKYFIELD, reason="requires skyfield")
def test_positions_skyfield(tmp_path):
"""
Test positions against those generated by skyfield.
"""
load = Loader(tmp_path)
t = Time("1980-03-25 00:00")
location = None
# skyfield ephemeris
try:
planets = load("de421.bsp")
ts = load.timescale()
except OSError as e:
if os.environ.get("CI", False) and "timed out" in str(e):
pytest.xfail("Timed out in CI")
else:
raise
mercury, jupiter, moon = (
planets["mercury"],
planets["jupiter barycenter"],
planets["moon"],
)
earth = planets["earth"]
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth + Topos(
latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m),
)
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
frame = TETE(obstime=t, location=location)
else:
frame = TETE(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch="date")
skyfield_mercury = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
ra, dec, dist = skyfield_jupiter.radec(epoch="date")
skyfield_jupiter = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
ra, dec, dist = skyfield_moon.radec(epoch="date")
skyfield_moon = SkyCoord(
ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km), frame=frame
)
# planet positions w.r.t true equator and equinox
moon_astropy = get_body("moon", t, location, ephemeris="de430").transform_to(frame)
mercury_astropy = get_body("mercury", t, location, ephemeris="de430").transform_to(
frame
)
jupiter_astropy = get_body("jupiter", t, location, ephemeris="de430").transform_to(
frame
)
assert (
moon_astropy.separation(skyfield_moon) < skyfield_angular_separation_tolerance
)
assert moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance
assert (
jupiter_astropy.separation(skyfield_jupiter)
< skyfield_angular_separation_tolerance
)
assert (
jupiter_astropy.separation_3d(skyfield_jupiter) < skyfield_separation_tolerance
)
assert (
mercury_astropy.separation(skyfield_mercury)
< skyfield_angular_separation_tolerance
)
assert (
mercury_astropy.separation_3d(skyfield_mercury) < skyfield_separation_tolerance
)
planets.close()
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup_method(self):
self.t = Time("1980-03-25 00:00")
self.apparent_frame = TETE(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
"mercury": SkyCoord(
ra="22h41m47.78s",
dec="-08d29m32.0s",
distance=c * 6.323037 * u.min,
frame=self.apparent_frame,
),
"moon": SkyCoord(
ra="07h32m02.62s",
dec="+18d34m05.0s",
distance=c * 0.021921 * u.min,
frame=self.apparent_frame,
),
"jupiter": SkyCoord(
ra="10h17m12.82s",
dec="+12d02m57.0s",
distance=c * 37.694557 * u.min,
frame=self.apparent_frame,
),
"sun": SkyCoord(
ra="00h16m31.00s",
dec="+01d47m16.9s",
distance=c * 8.294858 * u.min,
frame=self.apparent_frame,
),
}
@pytest.mark.parametrize(
("body", "sep_tol", "dist_tol"),
(
("mercury", 7.0 * u.arcsec, 1000 * u.km),
("jupiter", 78.0 * u.arcsec, 76000 * u.km),
("moon", 20.0 * u.arcsec, 80 * u.km),
("sun", 5.0 * u.arcsec, 11.0 * u.km),
),
)
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris="builtin")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("body", ("mercury", "jupiter", "sun", "moon"))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris="de432s")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_planets
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup_method(self):
kitt_peak = EarthLocation.from_geodetic(
lon=-111.6 * u.deg, lat=31.963333333333342 * u.deg, height=2120 * u.m
)
self.t = Time("2014-09-25T00:00", location=kitt_peak)
self.apparent_frame = TETE(obstime=self.t, location=kitt_peak)
# Results returned by JPL Horizons web interface
self.horizons = {
"mercury": SkyCoord(
ra="13h38m58.50s",
dec="-13d34m42.6s",
distance=c * 7.699020 * u.min,
frame=self.apparent_frame,
),
"moon": SkyCoord(
ra="12h33m12.85s",
dec="-05d17m54.4s",
distance=c * 0.022054 * u.min,
frame=self.apparent_frame,
),
"jupiter": SkyCoord(
ra="09h09m55.55s",
dec="+16d51m57.8s",
distance=c * 49.244937 * u.min,
frame=self.apparent_frame,
),
}
@pytest.mark.parametrize(
("body", "sep_tol", "dist_tol"),
(
("mercury", 7.0 * u.arcsec, 500 * u.km),
("jupiter", 78.0 * u.arcsec, 82000 * u.km),
),
)
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris="builtin")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance, atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("body", ("mercury", "jupiter", "moon"))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris="de432s")
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < de432s_separation_tolerance_planets
# Assert distances are close.
assert_quantity_allclose(
astropy.distance, horizons.distance, atol=de432s_distance_tolerance
)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize("bodyname", ("mercury", "jupiter"))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris="de432s")
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris="de432s")
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_horizons_consistency_with_precision():
"""
A test to compare at high precision against output of JPL horizons.
Tests ephemerides, and conversions from ICRS to GCRS to TETE. We are aiming for
better than 2 milli-arcsecond precision.
We use the Moon since it is nearby, and moves fast in the sky so we are
testing for parallax, proper handling of light deflection and aberration.
"""
# JPL Horizon values for 2020_04_06 00:00 to 23:00 in 1 hour steps
# JPL Horizons has a known offset (frame bias) of 51.02 mas in RA. We correct that here
ra_apparent_horizons = [
170.167332531,
170.560688674,
170.923834838,
171.271663481,
171.620188972,
171.985340827,
172.381766539,
172.821772139,
173.314502650,
173.865422398,
174.476108551,
175.144332386,
175.864375310,
176.627519827,
177.422655853,
178.236955730,
179.056584831,
179.867427392,
180.655815385,
181.409252074,
182.117113814,
182.771311578,
183.366872837,
183.902395443,
] * u.deg + 51.02376467 * u.mas
dec_apparent_horizons = [
10.269112037,
10.058820647,
9.837152044,
9.603724551,
9.358956528,
9.104012390,
8.840674927,
8.571162442,
8.297917326,
8.023394488,
7.749873882,
7.479312991,
7.213246666,
6.952732614,
6.698336823,
6.450150213,
6.207828142,
5.970645962,
5.737565957,
5.507313851,
5.278462034,
5.049521497,
4.819038911,
4.585696512,
] * u.deg
with solar_system_ephemeris.set("de430"):
loc = EarthLocation.from_geodetic(
-67.787260 * u.deg, -22.959748 * u.deg, 5186 * u.m
)
times = Time("2020-04-06 00:00") + np.arange(0, 24, 1) * u.hour
astropy = get_body("moon", times, loc)
apparent_frame = TETE(obstime=times, location=loc)
astropy = astropy.transform_to(apparent_frame)
usrepr = UnitSphericalRepresentation(
ra_apparent_horizons, dec_apparent_horizons
)
horizons = apparent_frame.realize_frame(usrepr)
assert_quantity_allclose(astropy.separation(horizons), 0 * u.mas, atol=1.5 * u.mas)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body("sun", time, ephemeris="de432s")
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1 * u.arcsec
def test_get_body_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_body("moon", times, ephemeris="builtin")
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric("earth", Time("2016-03-20T12:30:00"))
ep2, _ = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel("earth", Time("2016-03-20T12:30:00"))
assert_quantity_allclose(ep.xyz, [-1.0, 0.0, 0.0] * u.AU, atol=0.01 * u.AU)
expected = (
u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)])
* -30.0
* u.km
/ u.s
)
assert_quantity_allclose(ev.xyz, expected, atol=1.0 * u.km / u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel("earth", t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(
ep.get_xyz(xyz_axis=-1),
[[-1.0, 0.0, 0.0], [+1.0, 0.0, 0.0]] * u.AU,
atol=0.06 * u.AU,
)
expected = u.Quantity([0.0 * u.one, np.cos(23.5 * u.deg), np.sin(23.5 * u.deg)]) * (
[[-30.0], [30.0]] * u.km / u.s
)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected, atol=2.0 * u.km / u.s)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
("body", "pos_tol", "vel_tol"),
(
("mercury", 1000.0 * u.km, 1.0 * u.km / u.s),
("jupiter", 100000.0 * u.km, 2.0 * u.km / u.s),
("earth", 10 * u.km, 10 * u.mm / u.s),
("moon", 18 * u.km, 50 * u.mm / u.s),
),
)
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms), and the Moon
# (which nominally is 6 km rms).
t = Time("2016-03-20T12:30:00")
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time("2016-03-20T12:30:00") + np.arange(8.0).reshape(2, 2, 2) * u.yr / 2.0
ep, ev = get_body_barycentric_posvel(body, t, ephemeris="builtin")
dp, dv = get_body_barycentric_posvel(body, t, ephemeris="de432s")
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
@pytest.mark.parametrize(
"time",
(Time("1960-01-12 00:00"), Time("1980-03-25 00:00"), Time("2010-10-13 00:00")),
)
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = "http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp"
# Pass the ephemeris directly as a URL.
coord_by_url = get_body("earth", time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body("earth", time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)
assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)
assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_url_ephemeris_wrong_input():
time = Time("1960-01-12 00:00")
with pytest.raises((HTTPError, URLError)):
# A non-existent URL
get_body(
"earth",
time,
ephemeris=get_pkg_data_filename("path/to/nonexisting/file.bsp"),
)
with pytest.raises(HTTPError):
# A non-existent version of the JPL ephemeris
get_body("earth", time, ephemeris="de001")
with pytest.raises(ValueError):
# An invalid string
get_body("earth", time, ephemeris="not_an_ephemeris")
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_file_ephemeris_wrong_input():
time = Time("1960-01-12 00:00")
# Try loading a non-existing file:
with pytest.raises(ValueError):
get_body("earth", time, ephemeris="/path/to/nonexisting/file.bsp")
# NOTE: This test currently leaves the file open (ResourceWarning).
# To fix this issue, an upstream fix is required in jplephem
# package.
# Try loading a file that does exist, but is not an ephemeris file:
with pytest.warns(ResourceWarning), pytest.raises(ValueError):
get_body("earth", time, ephemeris=__file__)
def test_regression_10271():
t = Time(58973.534052125986, format="mjd")
# GCRS position of ALMA at this time
obs_p = CartesianRepresentation(
5724535.74068625, -1311071.58985697, -2492738.93017009, u.m
)
geocentre = CartesianRepresentation(0, 0, 0, u.m)
icrs_sun_from_alma = _get_apparent_body_position("sun", t, "builtin", obs_p)
icrs_sun_from_geocentre = _get_apparent_body_position(
"sun", t, "builtin", geocentre
)
difference = (icrs_sun_from_alma - icrs_sun_from_geocentre).norm()
assert_quantity_allclose(difference, 0.13046941 * u.m, atol=1 * u.mm)
def test_get_moon_deprecation():
time_now = Time.now()
with pytest.warns(
AstropyDeprecationWarning, match=r'Use get_body\("moon"\) instead\.$'
):
moon = get_moon(time_now)
assert moon == get_body("moon", time_now)
|
d9d2def56a3a07206e46918985917771697d6c44f7829d441cc28df3631cb782 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
import os
import warnings
from importlib import metadata
import erfa
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CIRS,
GCRS,
HCRS,
ICRS,
ITRS,
TEME,
TETE,
AltAz,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
HADec,
HeliocentricMeanEcliptic,
PrecessedGeocentric,
SkyCoord,
SphericalRepresentation,
UnitSphericalRepresentation,
get_sun,
solar_system_ephemeris,
)
from astropy.coordinates.angle_utilities import golden_spiral_grid
from astropy.coordinates.builtin_frames.intermediate_rotation_transforms import (
cirs_to_itrs_mat,
gcrs_to_cirs_mat,
get_location_gcrs,
tete_to_itrs_mat,
)
from astropy.coordinates.builtin_frames.utils import get_jd12
from astropy.coordinates.solar_system import (
_apparent_position_in_true_coordinates,
get_body,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
CI = os.environ.get("CI", False) == "true"
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.0, 1, len(usph)) * u.pc
inod = ICRS(usph)
iwd = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS())
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time("J2005"))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
icrs_coords = [ICRS(usph), ICRS(usph.lon, usph.lat, distance=dist)]
gcrs_frames = [GCRS(), GCRS(obstime=Time("J2005"))]
@pytest.mark.parametrize("icoo", icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS())
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10 * u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10 * u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10 * u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000.0, 0, 0] * u.km, obsgeovel=[1, 0, 0] * u.km / u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10 * u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS()) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize("gframe", gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10 * u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10 * u.deg)
# and the distance should transform at least somehow
assert not allclose(
gcrswd.distance, icrs_coords[1].distance, rtol=1e-8, atol=1e-10 * u.pc
)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
altazframe = AltAz(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_cirs_to_hadec():
"""
Check the basic CIRS<->HADec transforms.
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.pc
cirs = CIRS(usph, obstime="J2000")
crepr = SphericalRepresentation(lon=usph.lon, lat=usph.lat, distance=dist)
cirscart = CIRS(
crepr, obstime=cirs.obstime, representation_type=CartesianRepresentation
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
hadecframe = HADec(location=loc, obstime=Time("J2005"))
cirs2 = cirs.transform_to(hadecframe).transform_to(cirs)
cirs3 = cirscart.transform_to(hadecframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_itrs_topo_to_altaz_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
altaz_frame1 = AltAz(obstime="J2000", location=loc)
altaz_frame2 = AltAz(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
altaz1 = icrs.transform_to(altaz_frame1)
# Refraction added
altaz2 = icrs.transform_to(altaz_frame2)
# Refraction removed
cirs = altaz2.transform_to(cirs_frame)
altaz3 = cirs.transform_to(altaz_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
altaz11 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz11.az - altaz1.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.alt - altaz1.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz11.distance - altaz1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = altaz11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
altaz22 = itrs.transform_to(altaz_frame2)
assert_allclose(altaz22.az - altaz2.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.alt - altaz2.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz22.distance - altaz2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = altaz22.transform_to(itrs_frame)
altaz33 = itrs.transform_to(altaz_frame1)
assert_allclose(altaz33.az - altaz3.az, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.alt - altaz3.alt, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(altaz33.distance - altaz3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_itrs_topo_to_hadec_with_refraction():
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
dist = np.linspace(1.0, 1000.0, len(usph)) * u.au
icrs = ICRS(ra=usph.lon, dec=usph.lat, distance=dist)
hadec_frame1 = HADec(obstime="J2000", location=loc)
hadec_frame2 = HADec(
obstime="J2000", location=loc, pressure=1000.0 * u.hPa, relative_humidity=0.5
)
cirs_frame = CIRS(obstime="J2000", location=loc)
itrs_frame = ITRS(location=loc)
# Normal route
# No Refraction
hadec1 = icrs.transform_to(hadec_frame1)
# Refraction added
hadec2 = icrs.transform_to(hadec_frame2)
# Refraction removed
cirs = hadec2.transform_to(cirs_frame)
hadec3 = cirs.transform_to(hadec_frame1)
# Through ITRS
# No Refraction
itrs = icrs.transform_to(itrs_frame)
hadec11 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec11.ha - hadec1.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.dec - hadec1.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec11.distance - hadec1.distance, 0 * u.cm, atol=10.0 * u.cm)
# Round trip
itrs11 = hadec11.transform_to(itrs_frame)
assert_allclose(itrs11.x, itrs.x)
assert_allclose(itrs11.y, itrs.y)
assert_allclose(itrs11.z, itrs.z)
# Refraction added
hadec22 = itrs.transform_to(hadec_frame2)
assert_allclose(hadec22.ha - hadec2.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.dec - hadec2.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec22.distance - hadec2.distance, 0 * u.cm, atol=10.0 * u.cm)
# Refraction removed
itrs = hadec22.transform_to(itrs_frame)
hadec33 = itrs.transform_to(hadec_frame1)
assert_allclose(hadec33.ha - hadec3.ha, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.dec - hadec3.dec, 0 * u.mas, atol=0.1 * u.mas)
assert_allclose(hadec33.distance - hadec3.distance, 0 * u.cm, atol=10.0 * u.cm)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(ITRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation_type = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS()).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon, gcrsc2.ra)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS geocentric transforms for round-tripping.
"""
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000")
cirs6 = CIRS(usph, obstime="J2006")
cirs2 = cirs.transform_to(ITRS()).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS()).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_cirs_itrs_topo():
"""
Check basic CIRS<->ITRS topocentric transforms for round-tripping.
"""
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg, height=0 * u.m)
usph = golden_spiral_grid(200)
cirs = CIRS(usph, obstime="J2000", location=loc)
cirs6 = CIRS(usph, obstime="J2006", location=loc)
cirs2 = cirs.transform_to(ITRS(location=loc)).transform_to(cirs)
# different obstime
cirs6_2 = cirs6.transform_to(ITRS(location=loc)).transform_to(cirs)
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
usph = golden_spiral_grid(200)
gcrs = GCRS(usph, obstime="J2000")
gcrs6 = GCRS(usph, obstime="J2006")
gcrs2 = gcrs.transform_to(CIRS()).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS()).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
# these should be different:
assert not allclose(gcrs.ra, gcrs6_2.ra, rtol=1e-8)
assert not allclose(gcrs.dec, gcrs6_2.dec, rtol=1e-8)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = (
gcrs.transform_to(ITRS())
.transform_to(CIRS())
.transform_to(ITRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = (
gcrs.transform_to(ICRS())
.transform_to(CIRS())
.transform_to(ICRS())
.transform_to(gcrs)
)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000")[None] # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_gcrs_hadec():
"""
Check GCRS<->HADec transforms for round-tripping. Has multiple paths
"""
from astropy.coordinates import EarthLocation
usph = golden_spiral_grid(128)
gcrs = GCRS(usph, obstime="J2000") # broadcast with times below
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day, format="jd")[:, None]
loc = EarthLocation(lon=10 * u.deg, lat=80.0 * u.deg)
hdframe = HADec(obstime=times, location=loc)
hd1 = gcrs.transform_to(hdframe)
hd2 = gcrs.transform_to(ICRS()).transform_to(CIRS()).transform_to(hdframe)
hd3 = gcrs.transform_to(ITRS()).transform_to(CIRS()).transform_to(hdframe)
# make sure they're all consistent
assert_allclose(hd1.dec, hd2.dec)
assert_allclose(hd1.ha, hd2.ha)
assert_allclose(hd1.dec, hd3.dec)
assert_allclose(hd1.ha, hd3.ha)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time("J2000").jd
gcrs_coo = GCRS(180 * u.deg, 2 * u.deg, distance=10000 * u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric())
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10 * u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10 * u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox="B1850"))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5 * u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5 * u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS())
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
def test_precessed_geocentric_different_obstime():
# Create two PrecessedGeocentric frames with different obstime
precessedgeo1 = PrecessedGeocentric(obstime="2021-09-07")
precessedgeo2 = PrecessedGeocentric(obstime="2021-06-07")
# GCRS->PrecessedGeocentric should give different results for the two frames
gcrs_coord = GCRS(10 * u.deg, 20 * u.deg, 3 * u.AU, obstime=precessedgeo1.obstime)
pg_coord1 = gcrs_coord.transform_to(precessedgeo1)
pg_coord2 = gcrs_coord.transform_to(precessedgeo2)
assert not pg_coord1.is_equivalent_frame(pg_coord2)
assert not allclose(pg_coord1.cartesian.xyz, pg_coord2.cartesian.xyz)
# Looping back to GCRS should return the original coordinate
loopback1 = pg_coord1.transform_to(gcrs_coord)
loopback2 = pg_coord2.transform_to(gcrs_coord)
assert loopback1.is_equivalent_frame(gcrs_coord)
assert loopback2.is_equivalent_frame(gcrs_coord)
assert_allclose(loopback1.cartesian.xyz, gcrs_coord.cartesian.xyz)
assert_allclose(loopback2.cartesian.xyz, gcrs_coord.cartesian.xyz)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [
# J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(-90 * u.deg, 65 * u.deg), obstime=Time("J2000")),
AltAz(location=EarthLocation(120 * u.deg, -35 * u.deg), obstime=Time("J2000")),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
AltAz(
location=EarthLocation(-90 * u.deg, 65 * u.deg),
obstime=Time("2014-08-01 08:00:00"),
),
AltAz(
location=EarthLocation(120 * u.deg, -35 * u.deg),
obstime=Time("2014-01-01 00:00:00"),
),
]
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
# roughly earth orbital eccentricity, but with an added tolerance
EARTHECC = 0.017 + 0.005
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == "gcrs"
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1) * u.au < sun.distance.to(u.au) < (EARTHECC + 1) * u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1) * u.au < sunaa.distance.to(u.au) < (EARTHECC + 1) * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS()).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS()).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(
testframe
)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000 * u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000 * u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(
UnitSphericalRepresentation(10 * u.deg, 20 * u.deg), obstime=testframe.obstime
)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize("testframe", totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS())
assert 0.97 * u.au < moonicrs.distance < 1.03 * u.au
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0 * u.deg, 0 * u.deg, distance=10 * u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < gcrs.distance.to(u.au) < (EARTHECC + 1) * u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1) * u.au < cirs.distance.to(u.au) < (EARTHECC + 1) * u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (
(EARTHECC - 1) * u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1) * u.au
)
@pytest.mark.parametrize("testframe", totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(testframe.obstime, "tdb"))
earth_icrs_xyz = earth_pv_bary[0] * u.au
moonoffset = [0, 0, MOONDIST.value] * MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000 * u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000 * u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216] * u.m
obsgeovel = [4.59798494, -407.84677071, 0.0] * u.m / u.s
moon_lapalma = SkyCoord(
GCRS(
318.7048445 * u.deg,
-11.98761996 * u.deg,
369722.8231031 * u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel,
)
)
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0 * u.m, atol=1 * u.m)
def test_teme_itrf():
"""
Test case transform from TEME to ITRF.
Test case derives from example on appendix C of Vallado, Crawford, Hujsak & Kelso (2006).
See https://celestrak.com/publications/AIAA/2006-6753/AIAA-2006-6753-Rev2.pdf
"""
v_itrf = CartesianDifferential(
-3.225636520, -2.872451450, 5.531924446, unit=u.km / u.s
)
p_itrf = CartesianRepresentation(
-1033.479383,
7901.2952740,
6380.35659580,
unit=u.km,
differentials={"s": v_itrf},
)
t = Time("2004-04-06T07:51:28.386")
teme = ITRS(p_itrf, obstime=t).transform_to(TEME(obstime=t))
v_teme = CartesianDifferential(
-4.746131487, 0.785818041, 5.531931288, unit=u.km / u.s
)
p_teme = CartesianRepresentation(
5094.18016210,
6127.64465050,
6380.34453270,
unit=u.km,
differentials={"s": v_teme},
)
assert_allclose(
teme.cartesian.without_differentials().xyz,
p_teme.without_differentials().xyz,
atol=30 * u.cm,
)
assert_allclose(
teme.cartesian.differentials["s"].d_xyz,
p_teme.differentials["s"].d_xyz,
atol=1.0 * u.cm / u.s,
)
# test round trip
itrf = teme.transform_to(ITRS(obstime=t))
assert_allclose(
itrf.cartesian.without_differentials().xyz,
p_itrf.without_differentials().xyz,
atol=100 * u.cm,
)
assert_allclose(
itrf.cartesian.differentials["s"].d_xyz,
p_itrf.differentials["s"].d_xyz,
atol=1 * u.cm / u.s,
)
def test_precessedgeocentric_loopback():
from_coo = PrecessedGeocentric(
1 * u.deg, 2 * u.deg, 3 * u.AU, obstime="2001-01-01", equinox="2001-01-01"
)
# Change just the obstime
to_frame = PrecessedGeocentric(obstime="2001-06-30", equinox="2001-01-01")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert not allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
# Change just the equinox
to_frame = PrecessedGeocentric(obstime="2001-01-01", equinox="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the direction but not the distance
assert not allclose(explicit_coo.ra, from_coo.ra, rtol=1e-10)
assert not allclose(explicit_coo.dec, from_coo.dec, rtol=1e-10)
assert allclose(explicit_coo.distance, from_coo.distance, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.ra, implicit_coo.ra, rtol=1e-10)
assert_allclose(explicit_coo.dec, implicit_coo.dec, rtol=1e-10)
assert_allclose(explicit_coo.distance, implicit_coo.distance, rtol=1e-10)
def test_teme_loopback():
from_coo = TEME(1 * u.AU, 2 * u.AU, 3 * u.AU, obstime="2001-01-01")
to_frame = TEME(obstime="2001-06-30")
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the coordinate
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert_allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
@pytest.mark.remote_data
def test_earth_orientation_table(monkeypatch):
"""Check that we can set the IERS table used as Earth Reference.
Use the here and now to be sure we get a difference.
"""
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
t = Time.now()
location = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
altaz = AltAz(location=location, obstime=t)
sc = SkyCoord(1 * u.deg, 2 * u.deg)
# Default: uses IERS_Auto, which will give a prediction.
# Note: tests run with warnings turned into errors, so it is
# meaningful if this passes.
if CI:
with warnings.catch_warnings():
# Server occasionally blocks IERS download in CI.
warnings.filterwarnings("ignore", message=r".*using local IERS-B.*")
# This also captures unclosed socket warning that is ignored in setup.cfg
warnings.filterwarnings("ignore", message=r".*unclosed.*")
altaz_auto = sc.transform_to(altaz)
else:
altaz_auto = sc.transform_to(altaz) # No warnings
with iers.earth_orientation_table.set(iers.IERS_B.open()):
with pytest.warns(AstropyWarning, match="after IERS data"):
altaz_b = sc.transform_to(altaz)
sep_b_auto = altaz_b.separation(altaz_auto)
assert_allclose(sep_b_auto, 0.0 * u.deg, atol=1 * u.arcsec)
assert sep_b_auto > 10 * u.microarcsecond
# Check we returned to regular IERS system.
altaz_auto2 = sc.transform_to(altaz)
assert altaz_auto2.separation(altaz_auto) == 0.0
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(
GCRS(
318.10579159 * u.deg,
-11.65281165 * u.deg,
365042.64880308 * u.km,
obstime=t,
)
)
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricMeanEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set("jpl"):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0 * u.deg, atol=10 * u.mas)
assert all(
sep > 10 * u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl)
)
# CIRS should be the same
assert_allclose(sep_cirs, 0.0 * u.deg, atol=1 * u.microarcsecond)
def test_tete_transforms():
"""
We test the TETE transforms for proper behaviour here.
The TETE transforms are tested for accuracy against JPL Horizons in
test_solar_system.py. Here we are looking to check for consistency and
errors in the self transform.
"""
loc = EarthLocation.from_geodetic("-22°57'35.1", "-67°47'14.1", 5186 * u.m)
time = Time("2020-04-06T00:00")
p, v = loc.get_gcrs_posvel(time)
gcrs_frame = GCRS(obstime=time, obsgeoloc=p, obsgeovel=v)
moon = SkyCoord(
169.24113968 * u.deg,
10.86086666 * u.deg,
358549.25381755 * u.km,
frame=gcrs_frame,
)
tete_frame = TETE(obstime=time, location=loc)
# need to set obsgeoloc/vel explicitly or skycoord behaviour over-writes
tete_geo = TETE(obstime=time, location=EarthLocation(*([0, 0, 0] * u.km)))
# test self-transform by comparing to GCRS-TETE-ITRS-TETE route
tete_coo1 = moon.transform_to(tete_frame)
tete_coo2 = moon.transform_to(tete_geo)
assert_allclose(tete_coo1.separation_3d(tete_coo2), 0 * u.mm, atol=1 * u.mm)
# test TETE-ITRS transform by comparing GCRS-CIRS-ITRS to GCRS-TETE-ITRS
itrs1 = moon.transform_to(CIRS()).transform_to(ITRS())
itrs2 = moon.transform_to(TETE()).transform_to(ITRS())
assert_allclose(itrs1.separation_3d(itrs2), 0 * u.mm, atol=1 * u.mm)
# test round trip GCRS->TETE->GCRS
new_moon = moon.transform_to(TETE()).transform_to(moon)
assert_allclose(new_moon.separation_3d(moon), 0 * u.mm, atol=1 * u.mm)
# test round trip via ITRS
tete_rt = tete_coo1.transform_to(ITRS(obstime=time)).transform_to(tete_coo1)
assert_allclose(tete_rt.separation_3d(tete_coo1), 0 * u.mm, atol=1 * u.mm)
# ensure deprecated routine remains consistent
# make sure test raises warning!
with pytest.warns(AstropyDeprecationWarning, match="The use of"):
tete_alt = _apparent_position_in_true_coordinates(moon)
assert_allclose(tete_coo1.separation_3d(tete_alt), 0 * u.mm, atol=100 * u.mm)
def test_straight_overhead():
"""
With a precise CIRS<->Observed transformation this should give Alt=90 exactly
If the CIRS self-transform breaks it won't, due to improper treatment of aberration
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
# Note, this won't be overhead for a topocentric observer because of
# aberration.
cirs_geo = obj.get_itrs(t).transform_to(CIRS(obstime=t))
# now get the Geocentric CIRS position of observatory
obsrepr = home.get_itrs(t).transform_to(CIRS(obstime=t)).cartesian
# topocentric CIRS position of a straight overhead object
cirs_repr = cirs_geo.cartesian - obsrepr
# create a CIRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
topocentric_cirs_frame = CIRS(obstime=t, location=home)
cirs_topo = topocentric_cirs_frame.realize_frame(cirs_repr)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = cirs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = cirs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def test_itrs_straight_overhead():
"""
With a precise ITRS<->Observed transformation this should give Alt=90 exactly
"""
t = Time("J2010")
obj = EarthLocation(-1 * u.deg, 52 * u.deg, height=10.0 * u.km)
home = EarthLocation(-1 * u.deg, 52 * u.deg, height=0.0 * u.km)
# An object that appears straight overhead - FOR A GEOCENTRIC OBSERVER.
itrs_geo = obj.get_itrs(t).cartesian
# now get the Geocentric ITRS position of observatory
obsrepr = home.get_itrs(t).cartesian
# topocentric ITRS position of a straight overhead object
itrs_repr = itrs_geo - obsrepr
# create a ITRS object that appears straight overhead for a TOPOCENTRIC OBSERVER
itrs_topo = ITRS(itrs_repr, obstime=t, location=home)
# Check AltAz (though Azimuth can be anything so is not tested).
aa = itrs_topo.transform_to(AltAz(obstime=t, location=home))
assert_allclose(aa.alt, 90 * u.deg, atol=1 * u.uas, rtol=0)
# Check HADec.
hd = itrs_topo.transform_to(HADec(obstime=t, location=home))
assert_allclose(hd.ha, 0 * u.hourangle, atol=1 * u.uas, rtol=0)
assert_allclose(hd.dec, 52 * u.deg, atol=1 * u.uas, rtol=0)
def jplephem_ge(minversion):
"""Check if jplephem is installed and has version >= minversion."""
# This is a separate routine since somehow with pyinstaller the stanza
# not HAS_JPLEPHEM or metadata.version('jplephem') < '2.15'
# leads to a module not found error.
try:
return HAS_JPLEPHEM and metadata.version("jplephem") >= minversion
except Exception:
return False
@pytest.mark.remote_data
@pytest.mark.skipif(not jplephem_ge("2.15"), reason="requires jplephem >= 2.15")
def test_aa_hd_high_precision():
"""These tests are provided by @mkbrewer - see issue #10356.
The code that produces them agrees very well (<0.5 mas) with SkyField once Polar motion
is turned off, but SkyField does not include polar motion, so a comparison to Skyfield
or JPL Horizons will be ~1" off.
The absence of polar motion within Skyfield and the disagreement between Skyfield and Horizons
make high precision comparisons to those codes difficult.
Updated 2020-11-29, after the comparison between codes became even better,
down to 100 nas.
Updated 2023-02-14, after IERS changes the IERS B format and analysis,
causing small deviations.
NOTE: the agreement reflects consistency in approach between two codes,
not necessarily absolute precision. If this test starts failing, the
tolerance can and should be weakened *if* it is clear that the change is
due to an improvement (e.g., a new IAU precession model).
"""
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
# Note: at this level of precision for the comparison, we have to include
# the location in the time, as it influences the transformation to TDB.
t = Time("2017-04-06T00:00:00.0", location=loc)
with solar_system_ephemeris.set("de430"):
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
moon_hd = moon.transform_to(HADec(obstime=t, location=loc))
# Numbers from
# https://github.com/astropy/astropy/pull/11073#issuecomment-735486271
# updated in https://github.com/astropy/astropy/issues/11683
# and again after the IERS_B change.
TARGET_AZ, TARGET_EL = 15.032673662647138 * u.deg, 50.303110087520054 * u.deg
TARGET_DISTANCE = 376252.88325051306 * u.km
assert_allclose(moon_aa.az, TARGET_AZ, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.alt, TARGET_EL, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_aa.distance, TARGET_DISTANCE, atol=0.1 * u.mm, rtol=0)
ha, dec = erfa.ae2hd(
moon_aa.az.to_value(u.radian),
moon_aa.alt.to_value(u.radian),
lat.to_value(u.radian),
)
ha = u.Quantity(ha, u.radian, copy=False)
dec = u.Quantity(dec, u.radian, copy=False)
assert_allclose(moon_hd.ha, ha, atol=0.1 * u.uas, rtol=0)
assert_allclose(moon_hd.dec, dec, atol=0.1 * u.uas, rtol=0)
def test_aa_high_precision_nodata():
"""
These tests are designed to ensure high precision alt-az transforms.
They are a slight fudge since the target values come from astropy itself. They are generated
with a version of the code that passes the tests above, but for the internal solar system
ephemerides to avoid the use of remote data.
"""
# Last updated when the new IERS B format and analysis was introduced.
TARGET_AZ, TARGET_EL = 15.0323151 * u.deg, 50.30271925 * u.deg
lat = -22.959748 * u.deg
lon = -67.787260 * u.deg
elev = 5186 * u.m
loc = EarthLocation.from_geodetic(lon, lat, elev)
t = Time("2017-04-06T00:00:00.0")
moon = get_body("moon", t, loc)
moon_aa = moon.transform_to(AltAz(obstime=t, location=loc))
assert_allclose(moon_aa.az - TARGET_AZ, 0 * u.mas, atol=0.5 * u.mas)
assert_allclose(moon_aa.alt - TARGET_EL, 0 * u.mas, atol=0.5 * u.mas)
class TestGetLocationGCRS:
# TETE and CIRS use get_location_gcrs to get obsgeoloc and obsgeovel
# with knowledge of some of the matrices. Check that this is consistent
# with a direct transformation.
def setup_class(cls):
cls.loc = loc = EarthLocation.from_geodetic(
np.linspace(0, 360, 6) * u.deg, np.linspace(-90, 90, 6) * u.deg, 100 * u.m
)
cls.obstime = obstime = Time(np.linspace(2000, 2010, 6), format="jyear")
# Get comparison via a full transformation. We do not use any methods
# of EarthLocation, since those depend on the fast transform.
loc_itrs = ITRS(loc.x, loc.y, loc.z, obstime=obstime)
zeros = np.broadcast_to(0.0 * (u.km / u.s), (3,) + loc_itrs.shape, subok=True)
loc_itrs.data.differentials["s"] = CartesianDifferential(zeros)
loc_gcrs_cart = loc_itrs.transform_to(GCRS(obstime=obstime)).cartesian
cls.obsgeoloc = loc_gcrs_cart.without_differentials()
cls.obsgeovel = loc_gcrs_cart.differentials["s"].to_cartesian()
def check_obsgeo(self, obsgeoloc, obsgeovel):
assert_allclose(obsgeoloc.xyz, self.obsgeoloc.xyz, atol=0.1 * u.um, rtol=0.0)
assert_allclose(
obsgeovel.xyz, self.obsgeovel.xyz, atol=0.1 * u.mm / u.s, rtol=0.0
)
def test_get_gcrs_posvel(self):
# Really just a sanity check
self.check_obsgeo(*self.loc.get_gcrs_posvel(self.obstime))
def test_tete_quick(self):
# Following copied from intermediate_rotation_transforms.gcrs_to_tete
rbpn = erfa.pnm06a(*get_jd12(self.obstime, "tt"))
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, tete_to_itrs_mat(self.obstime, rbpn=rbpn), rbpn
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
def test_cirs_quick(self):
cirs_frame = CIRS(location=self.loc, obstime=self.obstime)
# Following copied from intermediate_rotation_transforms.gcrs_to_cirs
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
loc_gcrs_frame = get_location_gcrs(
self.loc, self.obstime, cirs_to_itrs_mat(cirs_frame.obstime), pmat
)
self.check_obsgeo(loc_gcrs_frame.obsgeoloc, loc_gcrs_frame.obsgeovel)
|
d26bb9167d1c38a4be4983e26984c16018d5545d71af419fadc37fd8a61b1b9f | import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, Latitude, Longitude
from astropy.coordinates.sites import (
SiteRegistry,
get_builtin_sites,
get_downloaded_sites,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg["greenwich"]
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = reg.names
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
@pytest.mark.remote_data(source="astropy")
def test_online_sites():
reg = get_downloaded_sites()
keck = reg["keck"]
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(
lon, -Longitude("155:28.7", unit=u.deg), atol=0.001 * u.deg
)
assert_quantity_allclose(lat, Latitude("19:49.7", unit=u.deg), atol=0.001 * u.deg)
assert_quantity_allclose(el, 4160 * u.m, atol=1 * u.m)
names = reg.names
assert "keck" in names
assert "ctio" in names
# The JSON file contains `name` and `aliases` for each site, and astropy
# should use names from both, but not empty strings [#12721].
assert "" not in names
assert "Royal Observatory Greenwich" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use the 'names' attribute to see",
):
reg["nonexistent"]
with pytest.raises(
KeyError,
match="Site 'kec' not in database. Use the 'names' attribute to see available",
):
reg["kec"]
@pytest.mark.remote_data(source="astropy")
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site("greenwich")
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude("0:0:0", unit=u.deg), atol=10 * u.arcsec)
assert_quantity_allclose(lat, Latitude("51:28:40", unit=u.deg), atol=1 * u.arcsec)
assert_quantity_allclose(el, 46 * u.m, atol=1 * u.m)
names = EarthLocation.get_site_names()
assert "greenwich" in names
assert "example_site" in names
with pytest.raises(
KeyError,
match="Site 'nonexistent' not in database. Use EarthLocation.get_site_names",
):
EarthLocation.of_site("nonexistent")
@pytest.mark.parametrize(
"class_method,args",
[(EarthLocation.get_site_names, []), (EarthLocation.of_site, ["greenwich"])],
)
def test_Earthlocation_refresh_cache_is_mandatory_kwarg(class_method, args):
with pytest.raises(
TypeError,
match=(
rf".*{class_method.__name__}\(\) takes [12] positional "
"arguments? but [23] were given$"
),
):
class_method(*args, False)
@pytest.mark.parametrize(
"class_method,args",
[(EarthLocation.get_site_names, []), (EarthLocation.of_site, ["greenwich"])],
)
@pytest.mark.parametrize("refresh_cache", [False, True])
def test_Earthlocation_refresh_cache(class_method, args, refresh_cache, monkeypatch):
def get_site_registry_monkeypatched(force_download, force_builtin=False):
assert force_download is refresh_cache
return get_builtin_sites()
monkeypatch.setattr(
EarthLocation, "_get_site_registry", get_site_registry_monkeypatched
)
class_method(*args, refresh_cache=refresh_cache)
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@pytest.mark.remote_data(source="astropy")
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ["sitea", "site A"]
loc = EarthLocation.from_geodetic(lat=1 * u.deg, lon=2 * u.deg, height=3 * u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg["SIteA"]
assert loc1 is loc
loc2 = reg["sIte a"]
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site("greenwich")
assert type(el2) is EarthLocation2
assert el2.info.name == "Royal Observatory Greenwich"
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(
builtin_registry[name].geocentric, dl_registry[name].geocentric
)
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(" ", name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(
" ",
name,
"builtin:",
builtin_registry[name],
"download:",
dl_registry[name],
)
assert False, (
"Builtin and download registry aren't consistent - failures printed to"
" stdout"
)
def test_meta_present():
reg = get_builtin_sites()
greenwich = reg["greenwich"]
assert (
greenwich.info.meta["source"]
== "Ordnance Survey via http://gpsinformation.net/main/greenwich.htm and UNESCO"
)
|
c008ea802ff270187a11fef0f7572129e3a2c56c7444d3d734677079e62f5c35 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import warnings
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([("a", 1), ("b", 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header["c"] = 100
assert "c" not in copied_header
# and changing the copy should not change the original.
copied_header["a"] = 0
assert original_header["a"] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([("a", 10)])
new_header = fits.Header(original_header, copy=True)
original_header["a"] = 20
assert new_header["a"] == 10
new_header["a"] = 0
assert original_header["a"] == 20
def test_init_with_dict():
dict1 = {"a": 11, "b": 12, "c": 13, "d": 14, "e": 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate("abcdefghijklmnopqrstuvwxyz")]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
header.rename_keyword("A", "B")
assert "A" not in header
assert "B" in header
assert header[0] == "B"
assert header["B"] == "B"
assert header.comments["B"] == "C"
@pytest.mark.parametrize("key", ["A", "a"])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
assert key in header
assert header[key] == "B"
assert header.get(key) == "B"
assert header.index(key) == 0
assert header.comments[key] == "C"
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert c.keyword == ""
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == "ABC"
assert c.value == "abc"
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card("abc", "<8 ch")
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card("nullstr", "")
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring("ABC = F")
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card("long_int", -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card("floatnum", -467374636747637647347374734737437.0)
if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad(
"FLOATNUM= -4.6737463674763E+032"
):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card("abc", 9, "abcde" * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (
str(c) == "ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
)
c = fits.Card("abc", "a" * 68, "abcdefg")
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)})
pytest.raises(ValueError, fits.Card, "key", [], "comment")
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, "abcdefghi", "long")
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card("abc+", 9)
assert len(w) == 1
assert c.image == _pad("HIERARCH abc+ = 9")
def test_add_history(self):
header = fits.Header(
[
("A", "B", "C"),
("HISTORY", 1),
("HISTORY", 2),
("HISTORY", 3),
("", "", ""),
("", "", ""),
]
)
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header["HISTORY"] == [1, 2, 3, 4]
assert repr(header["HISTORY"]) == "1\n2\n3\n4"
header.add_history(0, after="A")
assert len(header) == 6
assert header.cards[1].value == 0
assert header["HISTORY"] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header(
[("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")]
)
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[""] == [1, 2, 3, "", "", 4]
assert repr(header[""]) == "1\n2\n3\n\n\n4"
header.add_blank(0, after="A")
assert len(header) == 8
assert header.cards[1].value == 0
assert header[""] == [0, 1, 2, 3, "", "", 4]
header[""] = 5
header[" "] = 6
assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6]
assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({"FOO": ("BAR", "BAZ")})
header.update(FakeHeader([("A", 1), ("B", 2, "comment")]))
assert set(header.keys()) == {"FOO", "A", "B"}
assert header.comments["B"] == "comment"
# test that comments are preserved
tmphdr = fits.Header()
tmphdr["HELLO"] = (1, "this is a comment")
header.update(tmphdr)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO"}
assert header.comments["HELLO"] == "this is a comment"
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"}
assert set(header.values()) == {"BAR", 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data("arange.fits"))
hdul[0].header.update({"FOO": ("BAR", "BAZ")})
assert hdul[0].header["FOO"] == "BAR"
assert hdul[0].header.comments["FOO"] == "BAZ"
with pytest.raises(ValueError):
hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")})
hdul.writeto(self.temp("test.fits"))
hdul.close()
hdul = fits.open(self.temp("test.fits"), mode="update")
hdul[0].header.comments["FOO"] = "QUX"
hdul.close()
hdul = fits.open(self.temp("test.fits"))
assert hdul[0].header.comments["FOO"] == "QUX"
hdul[0].header.add_comment(0, after="FOO")
assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0"
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad("HISTORY " + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad("COMMENT " + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value."
)
assert (
c.value == "card has no comments. "
"/ text after slash is still part of the value."
)
assert c.comment == ""
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card("", " / EXPOSURE INFORMATION")
assert str(c) == _pad(" / EXPOSURE INFORMATION")
c = fits.Card.fromstring(str(c))
assert c.keyword == ""
assert c.value == " / EXPOSURE INFORMATION"
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring("ABC = (8, 9)")
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring("abc = + 2.1 e + 12")
assert c.value == 2100000000000.0
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment"
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment "
)
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring("ABC = ")
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header["FOO"] = "BAR"
header["UNDEF"] = None
assert list(header.values()) == ["BAR", None]
assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring("XYZ= 100")
assert c.keyword == "XYZ"
assert c.value == 100
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)"
err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'"
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify("fix")
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card(
"WHATEVER",
"SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_"
"03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY"
"_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml",
)
assert (
str(c)
== "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' "
)
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1["TEST"] = "abcdefg" * 30
h2 = fits.Header()
h2["TEST"] = "abcdefg" * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header["TEST1"] = ("Regular value", "Regular comment")
header["TEST2"] = ("long string value " * 10, "long comment " * 10)
header["TEST3"] = ("Regular value", "Regular comment")
assert repr(header).splitlines() == [
str(fits.Card("TEST1", "Regular value", "Regular comment")),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card("TEST3", "Regular value", "Regular comment")),
]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = "long string value " * 10
header = fits.Header()
header[""] = value
assert len(header) == 3
assert " ".join(header[""]) == value.rstrip()
# Ensure that this works like other commentary keywords
header["COMMENT"] = value
header["HISTORY"] = value
assert header["COMMENT"] == header["HISTORY"]
assert header["COMMENT"] == header[""]
def test_long_string_from_file(self):
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
c = hdul[0].header.cards["abc"]
hdul.close()
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10)
assert (
str(c)
== "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment "
)
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1")
+ _pad(
"continue 'continue with long string but without the "
"ampersand at the end' /"
)
+ _pad(
"continue 'continue must have string value (with quotes)' "
"/ comments with ''. "
)
)
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert (
str(c)
== "ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. "
)
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad(
"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'"
)
+ _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'")
+ _pad("CONTINUE '&' / pysyn expression")
)
assert c.keyword == "EXPR"
assert (
c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits "
"* 5.87359e-12 * MWAvg(Av=0.12)"
)
assert c.comment == "pysyn expression"
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h["SVALUE"] = "A" * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card("TEST", "long value" * 10, "long comment &" * 10)
assert (
str(c)
== "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & "
)
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(
AstropyUserWarning, match="HIERARCH card will be created"
) as w:
c = fits.Card(
"ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert len(w) == 1
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
# Test manual creation of hierarch card
c = fits.Card("hierarch abcdefghi", 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card(
"HIERARCH ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings."""
filename = fits.util.get_testdata_filepath("compressed_image.fits")
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
# Test also with creation via the Card constructor
c = fits.Card("HIERARCH key.META_4", "calFileVersion")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment")
# This should not raise any exceptions
c.verify("exception")
assert c.keyword == "WeirdCard.~!@#_^$%&"
assert c.value == "The value"
assert c.comment == "a comment"
# Test also the specific case from the original bug report
header = fits.Header(
[
("simple", True),
("BITPIX", 8),
("NAXIS", 0),
("EXTEND", True, "May contain datasets"),
("HIERARCH key.META_0", "detRow"),
]
)
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index("key.META_0")]) == str(
header2.cards[header2.index("key.META_0")]
)
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], "NAXIS")
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header["NAXIS"]
def test_hierarch_card_lookup(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
assert "abcdefghi" in header
assert header["abcdefghi"] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert "ABCDEFGHI" in header
def test_hierarch_card_delete(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
del header["hierarch abcdefghi"]
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["abcdefghi"] = 10
header["abcdefgh"] = 10
header["abcdefg"] = 10
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header["abcdefghij"]
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header[2]
assert list(header.keys())[2] == "abcdefg".upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLAH BLAH": "TESTA"})
assert len(w) == 0
assert "BLAH BLAH" in header
assert header["BLAH BLAH"] == "TESTA"
header.update({"HIERARCH BLAH BLAH": "TESTB"})
assert len(w) == 0
assert header["BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH": "TESTC"})
assert len(w) == 1
assert len(header) == 1
assert header["BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["blah blah"], "TESTD"
header.update({"blah blah": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["blah blah"], "TESTE"
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({"BLAH BLAH BLAH": "TESTA"})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"})
assert len(w) == 3
assert header["BLAH BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH BLAH": "TESTC"})
assert len(w) == 4
assert header["BLAH BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah blah": "TESTD"})
assert len(w) == 4
assert header["blah blah blah"], "TESTD"
header.update({"blah blah blah": "TESTE"})
assert len(w) == 5
assert header["blah blah blah"], "TESTE"
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLA BLA": "TESTA"})
assert len(w) == 0
assert "BLA BLA" in header
assert header["BLA BLA"] == "TESTA"
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 0
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 1
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTE"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({"BLA BLA": "TESTA"})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 1
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 2
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 3
assert len(header) == 1
assert header["bla bla"], "TESTE"
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header["FOO"] = ("bar", "baz", "qux")
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header["FOO"] = ("BAR",)
header["FOO2"] = (None,)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == ""
assert header.comments["FOO"] == ""
def test_header_setitem_2tuple(self):
header = fits.Header()
header["FOO"] = ("BAR", "BAZ")
header["FOO2"] = (None, None)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == "BAZ"
assert header.comments["FOO"] == "BAZ"
assert header.comments["FOO2"] == ""
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header["FOO"] = "BAR"
assert header["FOO"] == "BAR"
header["FOO"] = None
assert header["FOO"] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep="\n")
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header["UNDEF3"] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header["DEFINED"] == 42
assert header["UNDEF"] is None
assert header["UNDEF2"] is None
assert header["UNDEF3"] is None
assert header["UNDEF5"] is None
assert header["UNDEF6"] is None
# Assign an undefined value to a new card
header["UNDEF4"] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([("A", "B", "C")])
header.set("A", comment="D")
assert header["A"] == "B"
assert header.comments["A"] == "D"
def test_header_iter(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header) == ["A", "C"]
def test_header_slice(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
newheader = header[1:]
assert len(newheader) == 2
assert "A" not in newheader
assert "C" in newheader
assert "E" in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == "F"
assert newheader[1] == "D"
assert newheader[2] == "B"
newheader = header[::2]
assert len(newheader) == 2
assert "A" in newheader
assert "C" not in newheader
assert "E" in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = "GH"
assert header[1] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header[1:] = ["H", "I"]
assert header[1] == "H"
assert header[2] == "I"
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
del header[1:]
assert len(header) == 1
assert header[0] == "B"
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
newheader = header["AB*"]
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)])
assert len(header["DATE*"]) == 3
assert len(header["DATE?*"]) == 2
assert len(header["DATE-*"]) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header["AB*"] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header["AB*"] = "GH"
assert header[0] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header["AB*"] = ["H", "I"]
assert header[0] == "H"
assert header[2] == "I"
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)])
del header["AB*"]
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header(
[
("ABC", 0),
("HISTORY", 1),
("HISTORY", 2),
("DEF", 3),
("HISTORY", 4),
("HISTORY", 5),
]
)
assert header["HISTORY"] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([("A", "B"), ("C", "D")])
header.clear()
assert "A" not in header
assert "C" not in header
assert len(header) == 0
@pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header["FOO"] = "BAR"
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp("temp.fits"), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(["A", "B"])
assert "A" in header
assert header["A"] is None
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] is None
assert header.comments["B"] == ""
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(["A", "B"], "C")
assert "A" in header
assert header["A"] == "C"
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] == "C"
assert header.comments["B"] == ""
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(["A"], ("B", "C"))
assert "A" in header
assert header["A"] == "B"
assert header.comments["A"] == "C"
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(["A", "B", "A"], "C")
assert "A" in header
assert ("A", 0) in header
assert ("A", 1) in header
assert ("A", 2) not in header
assert header[0] == "C"
assert header["A"] == "C"
assert header[("A", 0)] == "C"
assert header[2] == "C"
assert header[("A", 1)] == "C"
def test_header_items(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header.items()) == [("A", "B"), ("C", "D")]
def test_header_iterkeys(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.values(), ["B", "D"]):
assert a == b
def test_header_keys(self):
with fits.open(self.data("arange.fits")) as hdul:
assert list(hdul[0].header) == [
"SIMPLE",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"NAXIS3",
"EXTEND",
]
def test_header_list_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
last = header.pop()
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop(1)
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop(0)
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
pytest.raises(TypeError, header.pop, "A", "B", "C")
last = header.pop("G")
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop("C")
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop("A")
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
default = header.pop("X", "Y")
assert default == "Y"
assert len(header) == 1
pytest.raises(KeyError, header.pop, "X")
def test_popitem(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.setdefault("A") == "B"
assert header.setdefault("C") == "D"
assert header.setdefault("E") == "F"
assert len(header) == 3
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
assert "G" in header
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update({"A": "E", "F": "G"})
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([("A", "B"), ("C", "D")])
header.update(A="E", F="G")
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update([("A", "E"), fits.Card("F", "G")])
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header["MYKEY"] = ("some val", "some comment")
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == "XTENSION"
assert hdu.header[-1] == "some val"
assert ("MYKEY", 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == "some val"
assert hdu.header[-1] == "some other val"
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu2.header["HISTORY"] = "history 1"
hdu2.header["HISTORY"] = "history 2"
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) in hdu.header
assert hdu.header[("MYKEY", 1)] == "some other val"
assert len(hdu.header["HISTORY"]) == 3
assert hdu.header[-1] == "history 2"
hdu = fits.PrimaryHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) not in hdu.header
assert hdu.header["MYKEY"] == "some other val"
assert len(hdu.header["HISTORY"]) == 2
assert hdu.header[-1] == "history 2"
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data("test0.fits"))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.count("A") == 1
assert header.count("C") == 1
assert header.count("E") == 1
header["HISTORY"] = "a"
header["HISTORY"] = "b"
assert header.count("HISTORY") == 2
pytest.raises(KeyError, header.count, "G")
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ""
assert header[-2] == ""
# New card should fill the first blank by default
header.append(("E", "F"))
assert len(header) == 4
assert header[-2] == "F"
assert header[-1] == ""
# This card should not use up a blank spot
header.append(("G", "H"), useblanks=False)
assert len(header) == 5
assert header[-1] == ""
assert header[-2] == "H"
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.append("E")
assert len(header) == 3
assert list(header)[-1] == "E"
assert header[-1] is None
assert header.comments["E"] == ""
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append("")
assert len(header) == 4
assert list(header)[-1] == ""
assert header[""] == ""
assert header.comments[""] == ""
def test_header_insert_use_blanks(self):
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ("E", "F"))
assert len(header) == 4
assert header[1] == "F"
assert header[-1] == ""
assert header[-2] == "D"
# Insert a new card without using blanks
header.insert(1, ("G", "H"), useblanks=False)
assert len(header) == 5
assert header[1] == "H"
assert header[-1] == ""
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header(
[("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")]
)
header.insert("NAXIS1", ("NAXIS", 2, "Number of axes"))
assert list(header.keys())[0] == "NAXIS"
assert header[0] == 2
assert header.comments[0] == "Number of axes"
header.insert("NAXIS1", ("NAXIS2", 20), after=True)
assert list(header.keys())[1] == "NAXIS1"
assert list(header.keys())[2] == "NAXIS2"
assert header[2] == 20
header.insert(("COMMENT", 1), ("COMMENT", "Comment 2"))
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"]
header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True)
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"]
header.insert(-1, ("TEST1", True))
assert list(header.keys())[-2] == "TEST1"
header.insert(-1, ("TEST2", True), after=True)
assert list(header.keys())[-1] == "TEST2"
assert list(header.keys())[-3] == "TEST1"
def test_remove(self):
header = fits.Header([("A", "B"), ("C", "D")])
# When keyword is present in the header it should be removed.
header.remove("C")
assert len(header) == 1
assert list(header) == ["A"]
assert "C" not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove("F")
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove("F", ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")])
header.remove("A", remove_all=True)
assert "A" not in header
assert len(header) == 1
assert list(header) == ["C"]
assert header[0] == "D"
def test_header_comments(self):
header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")])
assert repr(header.comments) == " A C\n DEF H"
def test_comment_slices_and_filters(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
s = header.comments[1:]
assert list(s) == ["H", "K"]
s = header.comments[::-1]
assert list(s) == ["K", "H", "D"]
s = header.comments["A*"]
assert list(s) == ["D", "K"]
def test_comment_slice_filter_assign(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
header.comments[1:] = "L"
assert list(header.comments) == ["D", "L", "L"]
assert header.cards[header.index("AB")].comment == "D"
assert header.cards[header.index("EF")].comment == "L"
assert header.cards[header.index("AI")].comment == "L"
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ["L", "L", "D"]
header.comments["A*"] = ["M", "N"]
assert list(header.comments) == ["M", "L", "N"]
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header["HISTORY"] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header["HISTORY"][1:] == indices[1:]
assert header["HISTORY"][:3] == indices[:3]
assert header["HISTORY"][:6] == indices[:6]
assert header["HISTORY"][:-2] == indices[:-2]
assert header["HISTORY"][::-1] == indices[::-1]
assert header["HISTORY"][1::-1] == indices[1::-1]
assert header["HISTORY"][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ("A", "B", "C"))
header.append(("D", "E", "F"), end=True)
assert list(header["HISTORY"][1:]) == indices[1:]
assert list(header["HISTORY"][:3]) == indices[:3]
assert list(header["HISTORY"][:6]) == indices[:6]
assert list(header["HISTORY"][:-2]) == indices[:-2]
assert list(header["HISTORY"][::-1]) == indices[::-1]
assert list(header["HISTORY"][1::-1]) == indices[1::-1]
assert list(header["HISTORY"][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header["FOO"] = "BAR"
header["HISTORY"] = "ABC"
header["FRED"] = "BARNEY"
header["HISTORY"] = "DEF"
header["HISTORY"] = "GHI"
assert header["HISTORY"] == ["ABC", "DEF", "GHI"]
# Single value update
header["HISTORY"][0] = "FOO"
assert header["HISTORY"] == ["FOO", "DEF", "GHI"]
# Single value partial slice update
header["HISTORY"][1:] = "BAR"
assert header["HISTORY"] == ["FOO", "BAR", "BAR"]
# Multi-value update
header["HISTORY"][:] = ["BAZ", "QUX"]
assert header["HISTORY"] == ["BAZ", "QUX", "BAR"]
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header["HISTORY"] = "hello world"
header["HISTORY"] = "hello world"
header["COMMENT"] = "hello world"
assert header["HISTORY"] != header["COMMENT"]
header["COMMENT"] = "hello world"
assert header["HISTORY"] == header["COMMENT"]
def test_long_commentary_card(self):
header = fits.Header()
header["FOO"] = "BAR"
header["BAZ"] = "QUX"
longval = "ABC" * 30
header["HISTORY"] = longval
header["FRED"] = "BARNEY"
header["HISTORY"] = longval
assert len(header) == 7
assert list(header)[2] == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.set("HISTORY", longval, after="FOO")
assert len(header) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
header = fits.Header()
header.update({"FOO": "BAR"})
header.update({"BAZ": "QUX"})
longval = "ABC" * 30
header.add_history(longval)
header.update({"FRED": "BARNEY"})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.add_history(longval, after="FOO")
assert len(header.cards) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp("header.txt")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp("header.fits")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711"""
filename = self.data("scale.fits")
hdr = fits.Header.fromfile(filename)
assert hdr["DATASET"] == "2MASS"
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header["A"] = ("B", "C")
header["B"] = ("C", "D")
header["C"] = ("D", "E")
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
f.write("\nEND")
new_header = fits.Header.fromtextfile(self.temp("test.hdr"))
assert "END" not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, "END", "")
pytest.raises(ValueError, header.append, "END")
pytest.raises(ValueError, header.append, "END", end=True)
pytest.raises(ValueError, header.insert, len(header), "END")
pytest.raises(ValueError, header.set, "END")
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep="", endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += " " * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header("END =", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header("END = ", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header("END$%&%^*%*", True)
with pytest.warns(
AstropyUserWarning,
match=r"Unexpected bytes trailing END keyword: '\$%&%\^\*%\*'",
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header("END", False)
with pytest.warns(
AstropyUserWarning, match="Missing padding to end of the FITS block"
) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h["FOO"] = "BAR"
h["COMMENT"] = "hello"
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
out = f.read()
out = out.replace(b"hello", "héllo".encode("latin1"))
out = out.replace(b"BAR", "BÀR".encode("latin1"))
with open(self.temp("test2.fits"), "wb") as f2:
f2.write(out)
with pytest.warns(
AstropyUserWarning,
match="non-ASCII characters are present in the FITS file",
) as w:
h = fits.getheader(self.temp("test2.fits"))
assert h["FOO"] == "B?R"
assert h["COMMENT"] == "h?llo"
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")])
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after=0)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before="C")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after="A")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set("C", before=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("C", after=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep="\n")
# First the case that *does* work prior to fixing this issue
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep="\n")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert h.cards["FOCALLEN"]._modified
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert h.cards["APERTURE"]._modified
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h["FOCALLEN"] = 155.0
h["APERTURE"] = 0.0
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header["TEST"] = 5.0022221e-07
hdu.writeto(self.temp("test.fits"))
# Here we manually make the file invalid
with open(self.temp("test.fits"), "rb+") as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii("e"))
with fits.open(self.temp("test.fits")) as hdul, pytest.warns(
AstropyUserWarning
) as w:
hdul.writeto(self.temp("temp.fits"), output_verify="warn")
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad("FOO = T")
barimg = _pad("BAR = F")
h = fits.Header()
h["FOO"] = True
h["BAR"] = False
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h["FOO"] = np.bool_(True)
h["BAR"] = np.bool_(False)
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)])
assert list(h) == ["ABC", "DEF", "GEH"]
assert "abc" in h
assert "dEf" in h
assert h["geh"] == 3
# Case insensitivity of wildcards
assert len(h["g*"]) == 1
h["aBc"] = 2
assert h["abc"] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h["gEh"]
assert list(h) == ["ABC", "DEF"]
assert len(h) == 2
assert h.get("def") == 2
h.set("Abc", 3)
assert h["ABC"] == 3
h.set("gEh", 3, before="Abc")
assert list(h) == ["GEH", "ABC", "DEF"]
assert h.pop("abC") == 3
assert len(h) == 2
assert h.setdefault("def", 3) == 2
assert len(h) == 2
assert h.setdefault("aBc", 1) == 1
assert len(h) == 3
assert list(h) == ["GEH", "DEF", "ABC"]
h.update({"GeH": 1, "iJk": 4})
assert len(h) == 4
assert list(h) == ["GEH", "DEF", "ABC", "IJK"]
assert h["GEH"] == 1
assert h.count("ijk") == 1
assert h.index("ijk") == 3
h.remove("Def")
assert len(h) == 3
assert list(h) == ["GEH", "ABC", "IJK"]
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header["TESTKW"] = ("Test val", "This is the END")
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp("test.hdr"))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = "\u30a8\u30ea\u30c3\u30af"
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h["FOO"] = "BAR"
assert "FOO" in h
assert h["FOO"] == "BAR"
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, "BAR")
h["FOO"] = "BAZ"
assert h["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, "FOO", erikku)
h["FOO"] = ("BAR", "BAZ")
assert h["FOO"] == "BAR"
assert h.comments["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, "FOO", ("BAR", erikku))
pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ"))
pytest.raises(ValueError, assign, "FOO", (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set("TEST", b"Hello")
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h["FOO"] = "Bar "
assert h["FOO"] == "Bar"
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp("strip_header_whitespace", False):
assert h["FOO"] == "Bar "
assert h["QUX"] == "Bar "
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
assert h["FOO"] == "Bar"
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = [
"CCD parameters table ...",
" reference table oref$n951041ko_ccd.fits",
" INFLIGHT 12/07/2001 25/02/2002",
" all bias frames",
] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header["HISTORY"] = item
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"]
new_hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30")
c2 = fits.Card.fromstring("Just some random text.")
c3 = fits.Card.fromstring("A" * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert "CLFIND2D" in header
assert "Just som" in header
assert "AAAAAAAA" in header
assert header["CLFIND2D"] == ": contour = 0.30"
assert header["Just som"] == "e random text."
assert header["AAAAAAAA"] == "A" * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, "CLFIND2D", "foo")
pytest.raises(ValueError, header.set, "Just som", "foo")
pytest.raises(ValueError, header.set, "AAAAAAAA", "foo")
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6")
with pytest.warns(
fits.verify.VerifyWarning, match=r"Verification reported errors"
):
c.verify("fix")
assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6")
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, "TEST", float("nan"))
pytest.raises(ValueError, h.set, "TEST", np.nan)
pytest.raises(ValueError, h.set, "TEST", np.float32("nan"))
pytest.raises(ValueError, h.set, "TEST", float("inf"))
pytest.raises(ValueError, h.set, "TEST", np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([("TEST", True)])
h["TEST"] = 1
assert h["TEST"] is not True
assert isinstance(h["TEST"], int)
assert h["TEST"] == 1
h["TEST"] = np.bool_(True)
assert h["TEST"] is True
h["TEST"] = False
assert h["TEST"] is False
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
h["TEST"] = 0
assert h["TEST"] is not False
assert isinstance(h["TEST"], int)
assert h["TEST"] == 0
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h["TEST"] = 1
# int -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# int -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# Now the same tests but with zeros
h["TEST"] = 0
# int -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
# int -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, "HISTORY", "\n")
pytest.raises(ValueError, h.set, "HISTORY", "\nabc")
pytest.raises(ValueError, h.set, "HISTORY", "abc\n")
pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef")
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if "\n" in card_image:
pytest.raises(fits.VerifyError, c.verify, "exception")
else:
c.verify("exception")
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = "abc" * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(("history", value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == "HISTORY" and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data("test0.fits"), "rb") as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data("test0.fits"))
assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"]
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr["KEY2 "] = 2
hdr["KEY2 "] = 4
assert len(hdr) == 1
assert hdr["KEY2"] == 4
assert hdr["KEY2 "] == 4
def test_strip(self):
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr.strip()
assert set(hdr) == {"HISTORY", "FOO"}
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr = hdr.copy(strip=True)
assert set(hdr) == {"HISTORY", "FOO"}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring("KW = INF / Comment")
card.value = "FIXED"
assert tuple(card) == ("KW", "FIXED", "Comment")
card.verify("fix")
assert tuple(card) == ("KW", "FIXED", "Comment")
card = fits.Card.fromstring("KW = INF")
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp("bogus.fits"))
with fits.open(self.temp("bogus.fits")) as hdul:
hdul[0].header["KW"] = -1
hdul.writeto(self.temp("bogus_fixed.fits"))
with fits.open(self.temp("bogus_fixed.fits")) as hdul:
assert hdul[0].header["KW"] == -1
def test_index_numpy_int(self):
header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")])
idx = np.int8(2)
assert header[idx] == "BAR"
header[idx] = "BAZ"
assert header[idx] == "BAZ"
header.insert(idx, ("D", 42))
assert header[idx] == 42
header.add_comment("HELLO")
header.add_comment("WORLD")
assert header["COMMENT"][np.int64(1)] == "WORLD"
header.append(("C", "BAZBAZ"))
assert header[("C", np.int16(0))] == "BAZ"
assert header[("C", np.uint32(1))] == "BAZBAZ"
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header["BITPIX"] = 32
header["NAXIS"] = 2
header["NAXIS1"] = 100
header["NAXIS2"] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup_method(self):
super().setup_method()
self._test_header = fits.Header()
self._test_header.set("DP1", "NAXIS: 2")
self._test_header.set("DP1", "AXIS.1: 1")
self._test_header.set("DP1", "AXIS.2: 2")
self._test_header.set("DP1", "NAUX: 2")
self._test_header.set("DP1", "AUX.1.COEFF.0: 0")
self._test_header.set("DP1", "AUX.1.POWER.0: 1")
self._test_header.set("DP1", "AUX.1.COEFF.1: 0.00048828125")
self._test_header.set("DP1", "AUX.1.POWER.1: 1")
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
assert c.comment == "A comment"
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.1
assert c.field_specifier == "NAXIS"
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1", "NAXIS: 2")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: 2.0")
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1", "NAXIS: a")
assert c.keyword == "DP1"
assert c.value == "NAXIS: a"
assert c.field_specifier is None
c = fits.Card("DP1.NAXIS", 2)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.field_specifier == "NAXIS"
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card("DP1.NAXIS", "a")
assert c.keyword == "DP1.NAXIS"
assert c.value == "a"
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
assert c.keyword == "DP1.NAXIS"
assert c.value == 2.0
assert c.comment == "A comment"
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == "NAXIS"
c.field_specifier = "NAXIS1"
assert c.field_specifier == "NAXIS1"
assert c.keyword == "DP1.NAXIS1"
assert c.value == 2.0
assert c.comment == "A comment"
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set("abc.def", 1)
header.set("abc.DEF", 2)
assert header["abc.def"] == 1
assert header["ABC.def"] == 1
assert header["aBc.def"] == 1
assert header["ABC.DEF"] == 2
assert "ABC.dEf" not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header["DP1"] == "NAXIS: 2"
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header["DP1.NAXIS"] == 2.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
assert self._test_header["DP1.AUX.1.COEFF.1"] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header["DP1.AXIS.3"]
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header["DP1.NAXIS"] == 3.0
assert isinstance(self._test_header["DP1.NAXIS"], float)
self._test_header["DP1.AXIS.1"] = 1.1
assert self._test_header["DP1.AXIS.1"] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h["D2IM1.EXTVER"] = 1
assert h["D2IM1.EXTVER"] == 1.0
h["D2IM1.EXTVER"] = 2
assert h["D2IM1.EXTVER"] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2"
c = fits.Card("DP1.NAXIS", 2)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
c = fits.Card("DP1.NAXIS", 2.0)
assert c.rawkeyword == "DP1"
assert c.rawvalue == "NAXIS: 2.0"
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set("DP1", "AXIS.3: 1", "a comment", after="DP1.AXIS.2")
assert self._test_header[3] == 1
assert self._test_header["DP1.AXIS.3"] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header["DP1.AXIS.1"]
assert len(self._test_header) == 7
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.AXIS.2"
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header["DP1.AXIS.2"]
assert len(self._test_header) == 6
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header["DP1.AXIS.*"]
assert isinstance(cl, fits.Header)
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
cl = self._test_header["DP1.N*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'",
]
cl = self._test_header["DP1.AUX..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl = self._test_header["DP?.NAXIS"]
assert [str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"]
cl = self._test_header["DP1.A*S.*"]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
]
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header["DP1.A*..."]
assert len(self._test_header) == 2
assert list(self._test_header)[0] == "DP1.NAXIS"
assert self._test_header[0] == 2
assert list(self._test_header)[1] == "DP1.NAUX"
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header["DP1.A*..."]
assert [str(c).strip() for c in cl.cards] == [
"DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
cl2 = cl["*.*AUX..."]
assert [str(c).strip() for c in cl2.cards] == [
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'",
]
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl) == ["DP1.AXIS.1", "DP1.AXIS.2"]
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header["DP1.AXIS.*"]
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header["DP1.AXIS.*"]
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h["HISTORY"] = "AXIS.1: 2"
h["HISTORY"] = "AXIS.2: 2"
assert "HISTORY.AXIS" not in h
assert "HISTORY.AXIS.1" not in h
assert "HISTORY.AXIS.2" not in h
assert h["HISTORY"] == ["AXIS.1: 2", "AXIS.2: 2"]
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h["HISTORY"] = "Date: 2012-09-19T13:58:53.756061"
assert "HISTORY.Date" not in h
assert str(h.cards[0]) == _pad("HISTORY Date: 2012-09-19T13:58:53.756061")
c = fits.Card.fromstring(" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ""
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h["FOO"] = "Date: 2012-09-19T13:58:53.756061"
assert "FOO.Date" not in h
assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'")
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h["FOO"] == "AXIS.1: 2"
assert h[("FOO", 1)] == "AXIS.2: 4"
assert h["FOO.AXIS.1"] == 2.0
assert h["FOO.AXIS.2"] == 4.0
assert "FOO.AXIS" not in h
assert "FOO.AXIS." not in h
assert "FOO." not in h
pytest.raises(KeyError, lambda: h["FOO.AXIS"])
pytest.raises(KeyError, lambda: h["FOO.AXIS."])
pytest.raises(KeyError, lambda: h["FOO."])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data("zerowidth.fits"))
output = hf.parse(extensions=["AIPS FQ"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split("\n")) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1], keywords=["EXTNAME", "BITPIX"])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split("\n")) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=["NAXIS*"])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data("test0.fits"))
assert "EXTNAME = 'SCI" in hf.parse(extensions=["SCI,2"])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data("comp.fits"))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True)
hf.close()
def test_fitsheader_compressed_from_primary_image_ext(self):
"""Regression test for issue https://github.com/astropy/astropy/issues/7312"""
data = np.arange(2 * 2, dtype=np.int8).reshape((2, 2))
phdu = fits.PrimaryHDU(data=data)
chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header)
chdu.writeto(self.temp("tmp2.fits"), overwrite=True)
with fits.open(self.temp("tmp2.fits")) as hdul:
assert "XTENSION" not in hdul[1].header
assert "PCOUNT" not in hdul[1].header
assert "GCOUNT" not in hdul[1].header
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data("zerowidth.fits")
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=["AIPS FQ", 2, "4"])
assert len(mytable) == (
len(fitsobj["AIPS FQ"].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header)
)
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=["AIPS FQ"])
assert np.all(mytable["filename"] == test_filename)
assert np.all(mytable["hdu"] == "AIPS FQ")
assert mytable["value"][mytable["keyword"] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"])
assert len(mytable) == 1
assert mytable["hdu"][0] == "AIPS FQ"
assert mytable["keyword"][0] == "EXTNAME"
assert mytable["value"][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=["DOES_NOT_EXIST"])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["DOES_NOT_EXIST"])
assert mytable is None
formatter.close()
@pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"])
def test_hdu_writeto_mode(self, mode):
with open(self.temp("mode.fits"), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ("no comment",)
return super().append(card, *args, **kwargs)
my_header = MyHeader(
(
("a", 1.0, "first"),
("b", 2.0, "second"),
(
"c",
3.0,
),
)
)
assert my_header.comments["a"] == "first"
assert my_header.comments["b"] == "second"
assert my_header.comments["c"] == "no comment"
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments["b"] == "second"
assert slice_.comments["c"] == "no comment"
selection = my_header["c*"]
assert type(selection) is MyHeader
assert selection.comments["c"] == "no comment"
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments["b"] == "second"
assert copy_.comments["c"] == "no comment"
my_header.extend((("d", 4.0),))
assert my_header.comments["d"] == "no comment"
|
8b5c833ecd3250aa003cb211563d95be9b2a5339d82e4f45041d705ba89df187 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
from io import BytesIO
from itertools import product
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.numpy import basic_indices
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import (
COMPRESSION_TYPES,
DITHER_SEED_CHECKSUM,
SUBTRACTIVE_DITHER_1,
)
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
from .test_table import comparerecords
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = fits.ImageHDU(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = fits.ImageHDU(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header["EXTVER"] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert "EXTVER" not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr["EXTVER"] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr["FILENAME"] = "labq01i3q_rawtag.fits"
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert phdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data("test0.fits")) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def test_open_2(self):
r = fits.open(self.data("test0.fits"))
info = [(0, "PRIMARY", 1, "PrimaryHDU", 138, (), "", "")] + [
(x, "SCI", x, "ImageHDU", 61, (40, 40), "int16", "") for x in range(1, 5)
]
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data("test0.fits"))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data("test0.fits"), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == "list index out of range"
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data("test0.fits"))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp("a_str.fits")
bfits = self.temp("b_str.fits")
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp("a_fileobj.fits")
bbfits = self.temp("b_fileobj.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp("a_str_slice.fits")
bfits = self.temp("b_str_slice.fits")
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp("a_fileobj_slice.fits")
bbfits = self.temp("b_fileobj_slice.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([("EXTNAME", "XPRIMARY"), ("EXTVER", 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert "EXTNAME" in hdul[0].header
assert hdul[0].name == "XPRIMARY"
assert hdul[0].name == hdul[0].header["EXTNAME"]
info = [(0, "XPRIMARY", 1, "PrimaryHDU", 5, (), "", "")]
assert hdul.info(output=False) == info
assert hdul["PRIMARY"] is hdul["XPRIMARY"]
assert hdul["PRIMARY"] is hdul[("XPRIMARY", 1)]
hdul[0].name = "XPRIMARY2"
assert hdul[0].header["EXTNAME"] == "XPRIMARY2"
hdul.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].name == "XPRIMARY2"
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data("test0.fits")) as r:
assert r["primary"].header["naxis"] == 0
assert r[0].header["naxis"] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r["sci", 1].header["detector"] == 1
# append (using "update()") a new card
r[0].header["xxx"] = 1.234e56
assert (
"\n".join(str(x) for x in r[0].header.cards[-3:])
== "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 "
)
# rename a keyword
r[0].header.rename_keyword("filename", "fname")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "history")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "simple")
r[0].header.rename_keyword("fname", "filename")
# get a subsection of data
assert np.array_equal(
r[2].data[:3, :3],
np.array(
[[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16
),
)
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp("test_new.fits"), mode="append") as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp("test_new.fits"), self.temp("test_append.fits"))
with fits.open(self.temp("test_append.fits"), mode="append") as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp("test_append.fits"), self.temp("test_update.fits"))
with fits.open(self.temp("test_update.fits"), mode="update") as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header["rootname"] == "U2EQ0201T"
u[0].header["rootname"] = "abc"
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp("test_new.fits"))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data("test0.fits")) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name="SCI")
assert np.array_equal(
hdu.data,
np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
],
dtype=np.float32,
),
)
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype="int32"))
assert (
"\n".join(str(x) for x in hdu2.header.cards[1:5])
== "BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters "
)
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data("test0.fits"), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(
AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\."
) as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(
AstropyUserWarning,
match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.",
) as w:
hdu.writeto(self.temp("test_new2.fits"), "fix")
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data("arange.fits"))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]),
)
assert np.array_equal(
fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])
)
assert np.array_equal(
fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array(
[
[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384],
]
),
)
assert np.array_equal(
fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]]),
)
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(
fs[0].section[3:6, :, :][:3, :3, :3],
np.array(
[
[[330, 331, 332], [341, 342, 343], [352, 353, 354]],
[[440, 441, 442], [451, 452, 453], [462, 463, 464]],
[[550, 551, 552], [561, 562, 563], [572, 573, 574]],
]
),
)
assert np.array_equal(
fs[0].section[:, :, :][:3, :2, :2],
np.array(
[[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]]
),
)
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3])
bool_index = np.array(
[True, False, True, True, False, False, True, True, False, True]
)
assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :])
assert np.array_equal(fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3])
# Can we use negative indices?
assert np.array_equal(fs[0].section[-1], dat[-1])
assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7])
assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
]:
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data("scale.fits"))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype(">i2")
with fits.open(self.data("scale.fits")) as hdul:
assert hdul[0].data.dtype == np.dtype("float32")
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp("test_new.fits"), data=np.array([], dtype="uint8"))
d = np.zeros([100, 100]).astype("uint16")
fits.append(self.temp("test_new.fits"), data=d)
with fits.open(self.temp("test_new.fits"), uint=True) as f:
assert f[1].data.dtype == "uint16"
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type="uint8", bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2**int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f"uint{int_size}"
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
filename = f"uint{int_size}.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in new_uint_hdu.header
assert new_uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
@pytest.mark.parametrize(("from_file"), (False, True))
@pytest.mark.parametrize(("do_not_scale"), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(
self, from_file, do_not_scale
):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype="uint16")
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = "unsigned_int.fits"
tmp_uint.writeto(self.temp(filename))
with fits.open(
self.temp(filename), do_not_scale_image_data=do_not_scale
) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert "BSCALE" in uint_hdu.header
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BSCALE"] == 1
assert uint_hdu.header["BZERO"] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header["BITPIX"] < 0
# BSCALE and BZERO should NOT be in header any more.
assert "BSCALE" not in uint_hdu.header
assert "BZERO" not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = "test_uint_to_float.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header["BLANK"] = 999
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header["BLANK"] = 2
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
hdu.writeto(self.temp("test_new.fits"))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
with fits.open(self.temp("test_new.fits")) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale("int16", bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp("test.fits")
hdu.data[0] = 9999
hdu.header["BLANK"] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(
fits.verify.VerifyWarning, match=r"Invalid 'BLANK' keyword in header"
):
hdul.writeto(self.temp("test2.fits"))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp("test2.fits")) as hdul2:
assert "BLANK" not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True, mode="update") as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename, do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header["BLANK"] == 9999
assert hdul4[0].header["BSCALE"] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header["BZERO"] = 1.0
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data("fixed-1890.fits"))
orig_data = hdul[0].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data("fixed-1890.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data("fixed-1890.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file("test0.fits")
with fits.open(self.temp("test0.fits"), mode="update") as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy["NAXIS*"]
hdul[1].header = hdr_copy
with fits.open(self.temp("test0.fits")) as hdul:
assert (orig_data == hdul[1].data).all()
# The test below raised a `ResourceWarning: unclosed transport` exception
# due to a bug in Python <=3.10 (cf. cpython#90476)
@pytest.mark.filterwarnings("ignore:unclosed transport <asyncio.sslproto")
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file("scale.fits")
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file("scale.fits")
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[0].header["BITPIX"]
orig_bzero = hdul[0].header["BZERO"]
orig_bscale = hdul[0].header["BSCALE"]
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].header["BITPIX"] == orig_bitpix
assert hdul[0].header["BZERO"] == orig_bzero
assert hdul[0].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("test0.fits")) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].data is None
assert h[1].header["NAXIS"] == 0
assert "NAXIS1" not in h[1].header
assert "NAXIS2" not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header["BLANK"] = "nan"
with pytest.warns(
fits.verify.VerifyWarning,
match=r"Invalid value for 'BLANK' keyword in header: 'nan'",
):
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as (
hdu,
):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r"data object array\(1\) should have at least one dimension"
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update") as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp("test.fits")) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
("data", "compression_type", "quantize_level"),
[
(np.zeros((2, 10, 10), dtype=np.float32), "RICE_1", 16),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_1", -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), "GZIP_2", -0.01),
(np.zeros((100, 100)) + 1, "HCOMPRESS_1", 16),
(np.zeros((10, 10)), "PLIO_1", 16),
],
)
@pytest.mark.parametrize("byte_order", ["<", ">"])
def test_comp_image(self, data, compression_type, quantize_level, byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(
data,
name="SCI",
compression_type=compression_type,
quantize_level=quantize_level,
)
ofd.append(chdu)
ofd.writeto(self.temp("test_new.fits"), overwrite=True)
ofd.close()
with fits.open(self.temp("test_new.fits")) as fd:
assert (fd[1].data == data).all()
assert fd[1].header["NAXIS"] == chdu.header["NAXIS"]
assert fd[1].header["NAXIS1"] == chdu.header["NAXIS1"]
assert fd[1].header["NAXIS2"] == chdu.header["NAXIS2"]
assert fd[1].header["BITPIX"] == chdu.header["BITPIX"]
@pytest.mark.remote_data
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import pickle
np.random.seed(42)
# Basically what scipy.datasets.ascent() does.
fname = download_file(
"https://github.com/scipy/dataset-ascent/blob/main/ascent.dat?raw=true"
)
with open(fname, "rb") as f:
scipy_data = np.array(pickle.load(f))
data = scipy_data + np.random.randn(512, 512) * 10
fits.ImageHDU(data).writeto(self.temp("im1.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-1,
dither_seed=5,
).writeto(self.temp("im2.fits"))
fits.CompImageHDU(
data,
compression_type="RICE_1",
quantize_method=1,
quantize_level=-100,
dither_seed=5,
).writeto(self.temp("im3.fits"))
im1 = fits.getdata(self.temp("im1.fits"))
im2 = fits.getdata(self.temp("im2.fits"))
im3 = fits.getdata(self.temp("im3.fits"))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(
ValueError,
fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32),
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_shape=(2, 10, 10),
)
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(
data=cube,
name="SCI",
compression_type="HCOMPRESS_1",
quantize_level=16,
tile_shape=(1, 5, 5),
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul["SCI"].data - cube).max() < 1.0 / 15.0
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view("uint8").sum() % 10000) + 1
hdu = fits.CompImageHDU(
data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM,
)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert "ZQUANTIZ" in hdul[1]._header
assert hdul[1]._header["ZQUANTIZ"] == "SUBTRACTIVE_DITHER_1"
assert "ZDITHER0" in hdul[1]._header
assert hdul[1]._header["ZDITHER0"] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data("comp.fits"), disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data("comp.fits")) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file("comp.fits")
mtime = os.stat(self.temp("comp.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("comp.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("comp.fits")).st_mtime
@pytest.mark.slow
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
mtime = os.stat(self.temp("scale.fits")).st_mtime
time.sleep(1)
fits.open(self.temp("scale.fits"), mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp("scale.fits")).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp("scale.fits"), "update")
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp("scale.fits")).st_mtime
hdul = fits.open(self.temp("scale.fits"), mode="update")
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp("scale.fits"))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype("float32")
assert hdul[1].header["BITPIX"] == -32
assert "BZERO" not in hdul[1].header
assert "BSCALE" not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(
hdul1[1].compressed_data, hdul2[1].compressed_data
)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(
self.data("fixed-1890.fits"), do_not_scale_image_data=True
) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("fixed-1890-z.fits"))
hdul = fits.open(self.temp("fixed-1890-z.fits"))
orig_data = hdul[1].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp("fixed-1890-z.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp("fixed-1890-z.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header)
chdu.writeto(self.temp("scale.fits"))
with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[1].header["BITPIX"]
orig_bzero = hdul[1].header["BZERO"]
orig_bscale = hdul[1].header["BSCALE"]
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[1].header["BITPIX"] == orig_bitpix
assert hdul[1].header["BZERO"] == orig_bzero
assert hdul[1].header["BSCALE"] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp("scale.fits")) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data("scale.fits")) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type="GZIP_1")
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(
data=noise, compression_type="GZIP_1", quantize_level=0.0
) # No quantization
chdu2.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = (np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange(
1, 7
)
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[: data2.shape[0], : data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type="RICE_1", tile_shape=(6, 7))
chdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), disable_image_compression=True) as h:
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM1"])
assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM2"])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file("comp.fits")
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header["test1"] = "test"
hdul[1]._header["test2"] = "test2"
with fits.open(self.temp("comp.fits")) as hdul:
assert "test1" in hdul[1].header
assert hdul[1].header["test1"] == "test"
assert "test2" in hdul[1].header
assert hdul[1].header["test2"] == "test2"
# Test update via index now:
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
hdr[hdr.index("TEST1")] = "foo"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["TEST1"] == "foo"
# Test slice updates
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header["TEST*"] = "qux"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["qux", "qux"]
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
idx = hdr.index("TEST1")
hdr[idx : idx + 2] = "bar"
with fits.open(self.temp("comp.fits")) as hdul:
assert list(hdul[1].header["TEST*"].values()) == ["bar", "bar"]
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdul[1].header[("COMMENT", 1)] = "I am fire. I am death!"
with fits.open(self.temp("comp.fits")) as hdul:
assert hdul[1].header["COMMENT"][1] == "I am fire. I am death!"
assert hdul[1]._header["COMMENT"][1] == "I am fire. I am death!"
# Test deleting by keyword and by slice
with fits.open(self.temp("comp.fits"), mode="update") as hdul:
hdr = hdul[1].header
del hdr["COMMENT"]
idx = hdr.index("TEST1")
del hdr[idx : idx + 2]
with fits.open(self.temp("comp.fits")) as hdul:
assert "COMMENT" not in hdul[1].header
assert "COMMENT" not in hdul[1]._header
assert "TEST1" not in hdul[1].header
assert "TEST1" not in hdul[1]._header
assert "TEST2" not in hdul[1].header
assert "TEST2" not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data("comp.fits")) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, "TFIELDS", 8)
test_set_keyword(hdr, "TTYPE1", "Foo")
test_set_keyword(hdr, "ZCMPTYPE", "ASDF")
test_set_keyword(hdr, "ZVAL1", "Foo")
def test_compression_header_append(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append("TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
imghdr.append(("FOO", "bar", "qux"), end=True)
assert "FOO" in imghdr
assert imghdr[-1] == "bar"
assert "FOO" in tblhdr
assert tblhdr[-1] == "bar"
imghdr.append(("CHECKSUM", "abcd1234"))
assert "CHECKSUM" in imghdr
assert imghdr["CHECKSUM"] == "abcd1234"
assert "CHECKSUM" not in tblhdr
assert "ZHECKSUM" in tblhdr
assert tblhdr["ZHECKSUM"] == "abcd1234"
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data("comp.fits")) as hdul:
header = hdul[1].header
while len(header) < 1000:
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, "TFIELDS")
assert len(w) == 1
assert "TFIELDS" not in imghdr
assert tblhdr.count("TFIELDS") == 1
# First try keyword-relative insert
imghdr.insert("TELESCOP", ("OBSERVER", "Phil Plait"))
assert "OBSERVER" in imghdr
assert imghdr.index("OBSERVER") == imghdr.index("TELESCOP") - 1
assert "OBSERVER" in tblhdr
assert tblhdr.index("OBSERVER") == tblhdr.index("TELESCOP") - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index("OBSERVER")
imghdr.insert("OBSERVER", ("FOO",))
assert "FOO" in imghdr
assert imghdr.index("FOO") == idx
assert "FOO" in tblhdr
assert tblhdr.index("FOO") == tblhdr.index("OBSERVER") - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data("comp.fits")) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set("ZBITPIX", 77, "asdf", after="XTENSION")
assert len(w) == 1
assert "ZBITPIX" not in imghdr
assert tblhdr.count("ZBITPIX") == 1
assert tblhdr["ZBITPIX"] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set("GCOUNT", 99, before="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") - 1
assert imghdr["GCOUNT"] == 99
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") - 1
assert tblhdr["ZGCOUNT"] == 99
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
imghdr.set("GCOUNT", 2, after="PCOUNT")
assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") + 1
assert imghdr["GCOUNT"] == 2
assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") + 1
assert tblhdr["ZGCOUNT"] == 2
assert tblhdr.index("PCOUNT") == 5
assert tblhdr.index("GCOUNT") == 6
assert tblhdr["GCOUNT"] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header["COMMENT"] = "hello world"
assert hdu.header["COMMENT"] == ["hello world"]
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["COMMENT"] == ["hello world"]
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype="float32")
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(("ZTENSION", "IMAGE"))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count("ZTENSION") == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZNAXIS")
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop("ZBITPIX")
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data("double_ext.fits")) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices["EXTNAME"]
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == "ccd00"
assert "EXTNAME" in hdu.header
assert hdu.name == hdu.header["EXTNAME"]
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices["EXTNAME"]
assert len(indices) == 1
# Test header sync from property set.
new_name = "NEW_NAME"
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header["EXTNAME"] == new_name
assert hdu._header["EXTNAME"] == new_name
assert hdu._image_header["EXTNAME"] == new_name
# Check that setting the header will change the name property.
hdu.header["EXTNAME"] = "NEW2"
assert hdu.name == "NEW2"
hdul.writeto(self.temp("tmp.fits"), overwrite=True)
with fits.open(self.temp("tmp.fits")) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices["EXTNAME"]) == 1
assert hdu1.name == "NEW2"
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header["EXTNAME"]
hdu.name = "RE-ADDED"
assert hdu.name == "RE-ADDED"
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = "FOO"
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
name = "BAR"
hdu.name = name
assert hdu._header["EXTNAME"] == name
assert hdu.header["EXTNAME"] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices["EXTNAME"]) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({"HELLO": "world"})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header["HELLO"] == "world"
@pytest.mark.parametrize(
("keyword", "dtype", "expected"),
[
("BSCALE", np.uint8, np.float32),
("BSCALE", np.int16, np.float32),
("BSCALE", np.int32, np.float64),
("BZERO", np.uint8, np.float32),
("BZERO", np.int16, np.float32),
("BZERO", np.int32, np.float64),
],
)
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp("test.fits"))
del hdu
with fits.open(self.temp("test.fits")) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize(
"dtype", (np.uint8, np.int16, np.uint16, np.int32, np.uint32)
)
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid - 50, mid + 50, dtype=dtype)
testfile = self.temp("test.fits")
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
@pytest.mark.parametrize(
("dtype", "compression_type"), product(("f", "i4"), COMPRESSION_TYPES)
)
def test_write_non_contiguous_data(self, dtype, compression_type):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
This used to require changing the whole array to be C-contiguous before
passing to CFITSIO, but we no longer need this - our explicit conversion
to bytes in the compression codecs returns contiguous bytes for each
tile on-the-fly.
"""
orig = np.arange(400, dtype=dtype).reshape((20, 20), order="f")[::2, ::2]
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig, compression_type=compression_type)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp("test.fits"))
actual = fits.getdata(self.temp("test.fits"))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data("comp.fits")) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp("test.fits"))
with fits.open(self.data("comp.fits")) as hdul1:
with fits.open(self.temp("test.fits")) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comp_image_deprecated_tile_size(self):
# Ensure that tile_size works but is deprecated. This test
# can be removed once support for tile_size is removed.
with pytest.warns(
AstropyDeprecationWarning,
match="The tile_size argument has been deprecated",
):
chdu = fits.CompImageHDU(np.zeros((3, 4, 5)), tile_size=(5, 2, 1))
assert chdu.tile_shape == (1, 2, 5)
def test_comp_image_deprecated_tile_size_and_tile_shape(self):
# Make sure that tile_size and tile_shape are not both specified
with pytest.warns(AstropyDeprecationWarning) as w:
with pytest.raises(
ValueError, match="Cannot specify both tile_size and tile_shape."
):
fits.CompImageHDU(
np.zeros((3, 4, 5)), tile_size=(5, 2, 1), tile_shape=(3, 2, 3)
)
def test_comp_image_properties_default(self):
chdu = fits.CompImageHDU(np.zeros((3, 4, 5)))
assert chdu.tile_shape == (1, 1, 5)
assert chdu.compression_type == "RICE_1"
def test_comp_image_properties_set(self):
chdu = fits.CompImageHDU(
np.zeros((3, 4, 5)), compression_type="PLIO_1", tile_shape=(2, 3, 4)
)
assert chdu.tile_shape == (2, 3, 4)
assert chdu.compression_type == "PLIO_1"
class TestCompHDUSections:
@pytest.fixture(autouse=True)
def setup_method(self, tmp_path):
shape = (13, 17, 25)
self.data = np.arange(np.product(shape)).reshape(shape).astype(np.int32)
header1 = fits.Header()
hdu1 = fits.CompImageHDU(
self.data, header1, compression_type="RICE_1", tile_shape=(5, 4, 5)
)
header2 = fits.Header()
header2["BSCALE"] = 2
header2["BZERO"] = 100
hdu2 = fits.CompImageHDU(
self.data, header2, compression_type="RICE_1", tile_shape=(5, 4, 5)
)
hdulist = fits.HDUList([fits.PrimaryHDU(), hdu1, hdu2])
hdulist.writeto(tmp_path / "sections.fits")
self.hdul = fits.open(tmp_path / "sections.fits")
def teardown_method(self):
self.hdul.close()
self.hdul = None
@given(basic_indices((13, 17, 25)))
def test_section_slicing(self, index):
assert_equal(self.hdul[1].section[index], self.hdul[1].data[index])
assert_equal(self.hdul[1].section[index], self.data[index])
@given(basic_indices((13, 17, 25)))
def test_section_slicing_scaling(self, index):
assert_equal(self.hdul[2].section[index], self.hdul[2].data[index])
assert_equal(self.hdul[2].section[index], self.data[index] * 2 + 100)
def test_comphdu_fileobj():
# Regression test for a bug that caused an error to happen
# internally when reading the data if requested data shapes
# were not plain integers - this was triggered when accessing
# sections on data backed by certain kinds of objects such as
# BytesIO (but not regular file handles)
data = np.arange(6).reshape((2, 3)).astype(np.int32)
byte_buffer = BytesIO()
header = fits.Header()
hdu = fits.CompImageHDU(data, header, compression_type="RICE_1")
hdu.writeto(byte_buffer)
byte_buffer.seek(0)
hdu2 = fits.open(byte_buffer, mode="readonly")[1]
assert hdu2.section[1, 2] == 5
def test_comphdu_bscale(tmp_path):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmp_path / "3hdus.fits"
filename2 = tmp_path / "3hdus_comp.fits"
x = np.random.random((100, 100)) * 100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x - 50, dtype=int), uint=True)
x1.header["BZERO"] = 20331
x1.header["BSCALE"] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(
data=hdus[1].data.astype(np.uint32), header=hdus[1].header
)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify("exception")
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename("data/compressed_float_bzero.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmp_path):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmp_path / "floatimg_with_bzero.fits"
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header["BZERO"] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmp_path):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmp_path / "test.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmp_path / "test2.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
"""Test for int8 support, https://github.com/astropy/astropy/issues/11995"""
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header["BITPIX"] == 8
assert hdul[0].header["BZERO"] == -128
assert hdul[0].header["BSCALE"] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
fe7f62ebb95ab972f904b2a72a453c31b8ed289470e2cd571d52b2bf71edc960 | """
This test file uses the https://github.com/esheldon/fitsio package to verify
our compression and decompression routines against the implementation in
cfitsio.
*Note*: The fitsio library is GPL licensed, therefore it could be interpreted
that so is this test file. Given that this test file isn't imported anywhere
else in the code this shouldn't cause us any issues. Please bear this in mind
when editing this file.
"""
import os
import numpy as np
import pytest
from astropy.io import fits
from .conftest import _expand, fitsio_param_to_astropy_param
# This is so that tox can force this file to be run, and not be silently
# skipped on CI, but in all other test runs it's skipped if fitsio isn't present.
if "ASTROPY_ALWAYS_TEST_FITSIO" in os.environ:
import fitsio
else:
fitsio = pytest.importorskip("fitsio")
@pytest.fixture(
scope="module",
params=_expand(
[((10,),), ((5,), (1,), (3,))],
[((12, 12),), ((1, 12), (4, 5), (6, 6), None)],
[((15, 15),), ((1, 15), (5, 1), (5, 5))],
[
((15, 15, 15),),
((5, 5, 1), (5, 7, 1), (1, 5, 4), (1, 1, 15), (15, 1, 5)),
],
# Test the situation where the tile shape is passed larger than the
# array shape
[
(
(4, 4, 5),
(5, 5, 5),
),
(
(5, 5, 1),
None,
),
],
# Test shapes which caused errors
# This one we can't test here as it causes segfaults in cfitsio
# It is tested in test_roundtrip_high_D though.
# [
# ((3, 4, 5),),
# ((1, 2, 3),),
# ],
# >3D Data are not currently supported by cfitsio
),
ids=lambda x: f"shape: {x[0]} tile_dims: {x[1]}",
)
def array_shapes_tile_dims(request, compression_type):
shape, tile_dims = request.param
# H_COMPRESS needs >=2D data and always 2D tiles
if compression_type == "HCOMPRESS_1":
if (
# We don't have at least a 2D image
len(shape) < 2
or
# We don't have 2D tiles
np.count_nonzero(np.array(tile_dims) != 1) != 2
or
# TODO: The following restrictions can be lifted with some extra work.
# The tile is not the first two dimensions of the data
tile_dims[0] == 1
or tile_dims[1] == 1
or
# The tile dimensions not an integer multiple of the array dims
np.count_nonzero(np.array(shape[:2]) % tile_dims[:2]) != 0
):
pytest.xfail(
"HCOMPRESS requires 2D tiles, from the first two"
"dimensions, and an integer number of tiles along the first two"
"axes."
)
return shape, tile_dims
@pytest.fixture(scope="module")
def tile_dims(array_shapes_tile_dims):
return array_shapes_tile_dims[1]
@pytest.fixture(scope="module")
def data_shape(array_shapes_tile_dims):
return array_shapes_tile_dims[0]
@pytest.fixture(scope="module")
def base_original_data(data_shape, dtype, numpy_rng, compression_type):
random = numpy_rng.uniform(high=255, size=data_shape)
# Set first value to be exactly zero as zero values require special treatment
# for SUBTRACTIVE_DITHER_2
random.ravel()[0] = 0.0
# There seems to be a bug with the fitsio library where HCOMPRESS doesn't
# work with int16 random data, so use a bit for structured test data.
if compression_type.startswith("HCOMPRESS") and "i2" in dtype or "u1" in dtype:
random = np.arange(np.product(data_shape)).reshape(data_shape)
return random.astype(dtype)
@pytest.fixture(scope="module")
def fitsio_compressed_file_path(
tmp_path_factory,
comp_param_dtype,
base_original_data,
data_shape, # For debugging
tile_dims,
):
compression_type, param, dtype = comp_param_dtype
if (
base_original_data.ndim > 2
and "u1" in dtype
and compression_type == "HCOMPRESS_1"
):
pytest.xfail("fitsio won't write these")
if compression_type == "PLIO_1" and "f" in dtype:
# fitsio fails with a compression error
pytest.xfail("fitsio fails to write these")
if compression_type == "NOCOMPRESS":
pytest.xfail("fitsio does not support NOCOMPRESS")
if (
compression_type == "HCOMPRESS_1"
and "f" in dtype
and param.get("qmethod", None) == 2
):
# fitsio writes these files with very large/incorrect zzero values, whereas
# qmethod == 1 works (and the two methods should be identical except for the
# treatment of zeros)
pytest.xfail("fitsio writes these files with very large/incorrect zzero values")
tmp_path = tmp_path_factory.mktemp("fitsio")
original_data = base_original_data.astype(dtype)
filename = tmp_path / f"{compression_type}_{dtype}.fits"
fits = fitsio.FITS(filename, "rw")
fits.write(original_data, compress=compression_type, tile_dims=tile_dims, **param)
return filename
@pytest.fixture(scope="module")
def astropy_compressed_file_path(
comp_param_dtype,
tmp_path_factory,
base_original_data,
data_shape, # For debugging
tile_dims,
):
compression_type, param, dtype = comp_param_dtype
original_data = base_original_data.astype(dtype)
tmp_path = tmp_path_factory.mktemp("astropy")
filename = tmp_path / f"{compression_type}_{dtype}.fits"
param = fitsio_param_to_astropy_param(param)
hdu = fits.CompImageHDU(
data=original_data,
compression_type=compression_type,
tile_shape=None if tile_dims is None else tile_dims[::-1],
**param,
)
hdu.writeto(filename)
return filename
def test_decompress(
fitsio_compressed_file_path,
comp_param_dtype,
):
compression_type, param, dtype = comp_param_dtype
with fits.open(fitsio_compressed_file_path) as hdul:
data = hdul[1].data
assert hdul[1]._header["ZCMPTYPE"].replace("ONE", "1") == compression_type
assert hdul[1].data.dtype.kind == np.dtype(dtype).kind
assert hdul[1].data.dtype.itemsize == np.dtype(dtype).itemsize
# The data might not always match the original data exactly in the case of
# lossy compression so instead of comparing the array read by astropy to the
# original data, we compare it to the data read in by fitsio (as those
# should match)
fts = fitsio.FITS(fitsio_compressed_file_path)
data2 = fts[1].read()
np.testing.assert_allclose(data, data2)
# The first value should be exactly equal to zero when using SUBTRACTIVE_DITHER_2
if param.get("qmethod", None) == 2:
assert data.ravel()[0] == 0.0
def test_compress(
astropy_compressed_file_path,
compression_type,
dtype,
):
if compression_type == "NOCOMPRESS":
pytest.xfail("fitsio does not support NOCOMPRESS")
fts = fitsio.FITS(astropy_compressed_file_path, "r")
header = fts[1].read_header()
data = fts[1].read()
assert header["ZCMPTYPE"] == compression_type
assert data.dtype.kind == np.dtype(dtype).kind
assert data.dtype.itemsize == np.dtype(dtype).itemsize
# The data might not always match the original data exactly in the case of
# lossy compression so instead of comparing the array read by fitsio to the
# original data, we compare it to the data read in by astropy (as those
# should match)
with fits.open(astropy_compressed_file_path) as hdul:
np.testing.assert_allclose(data, hdul[1].data)
|
bf0e5af206388b7d5e9b179a9cd4204f394d2d7e5a6fb1dfbe225dca0b1edb7b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from functools import partial
import numpy as np
from astropy import units as u
from astropy.modeling.convolution import Convolution
from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel
from astropy.nddata import support_nddata
from astropy.utils.console import human_file_size
from astropy.utils.exceptions import AstropyUserWarning
from ._convolve import _convolveNd_c
from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D
from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception
# np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)])
# fmt: off
_good_sizes = np.array(
[
0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12,
15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40,
45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90,
96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162,
180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288,
300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480,
486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720,
729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024,
1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458,
1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025,
2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700,
2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645,
3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800,
4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144,
6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776,
8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000,
]
)
# fmt: on
_good_range = int(np.log10(_good_sizes[-1]))
# Disabling doctests when scipy isn't present.
__doctest_requires__ = {("convolve_fft",): ["scipy.fft"]}
BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"]
def _next_fast_lengths(shape):
"""
Find optimal or good sizes to pad an array of ``shape`` to for better
performance with `numpy.fft.*fft` and `scipy.fft.*fft`.
Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise
looked up from list and scaled by powers of 10, if necessary.
"""
try:
import scipy.fft
return np.array([scipy.fft.next_fast_len(j) for j in shape])
except ImportError:
pass
newshape = np.empty(len(np.atleast_1d(shape)), dtype=int)
for i, j in enumerate(shape):
scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0)
for n in _good_sizes:
if n * scale >= j:
newshape[i] = n * scale
break
else:
raise ValueError(
f"No next fast length for {j} found in list of _good_sizes "
f"<= {_good_sizes[-1] * scale}."
)
return newshape
def _copy_input_if_needed(
input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None
):
# Alias input
input = input.array if isinstance(input, Kernel) else input
# strip quantity attributes
if hasattr(input, "unit"):
input = input.value
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there
# is no way to specify the return type or order etc. In addition
# ``np.nan`` is a ``float`` and there is no conversion to an
# ``int`` type. Therefore, a pre-fill copy is needed for non
# ``float`` masked arrays. ``subok=True`` is needed to retain
# ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill
# to act as the copy if type and order are already correct.
output = np.array(
input, dtype=dtype, copy=False, order=order, subok=True
)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(
input, dtype=dtype, copy=True, order=order, subok=False
)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass.
# If it is and `subok=False` (default), then it will copy even if `copy=False`. This
# uses less memory when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError(
"input should be a Numpy array or something convertible into a float array",
e,
)
return output
@support_nddata(data="array")
def convolve(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
mask=None,
preserve_nan=False,
normalization_zero_tol=1e-8,
):
"""
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``.
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
"""
if boundary not in BOUNDARY_OPTIONS:
raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}")
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(
passed_array,
dtype=float,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
array_dtype = getattr(passed_array, "dtype", array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(
passed_kernel,
dtype=float,
order="C",
nan_treatment=None,
mask=None,
fill_value=fill_value,
)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise_even_kernel_exception()
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn(
"Both array and kernel are Kernel instances, hardwiring "
"the following parameters: boundary='fill', fill_value=0,"
" normalize_Kernel=True, nan_treatment='interpolate'",
AstropyUserWarning,
)
boundary = "fill"
fill_value = 0
normalize_kernel = True
nan_treatment = "interpolate"
# -----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
# -----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError(
"convolve only supports 1, 2, and 3-dimensional arrays at this time"
)
elif array_internal.ndim != kernel_internal.ndim:
raise Exception("array and kernel have differing number of dimensions.")
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape // 2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels
# would be ignored leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR
# array_shape > kernel_shape-1 OR
# array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is
# complete.
if boundary is None and not np.all(array_shape > 2 * pad_width):
raise KernelSizeError(
"for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead."
)
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == "interpolate") and np.isnan(
array_internal.sum()
)
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero:
if nan_interpolate:
raise ValueError(
"Setting nan_treatment='interpolate' "
"requires the kernel to be normalized, "
"but the input kernel has a sum close "
"to zero. For a zero-sum kernel and "
"data with NaNs, set nan_treatment='fill'."
)
else:
raise ValueError(
"The kernel can't be normalized, because "
"its sum is close to zero. The sum of the "
f"given kernel is < {1.0 / MAX_NORMALIZATION}"
)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == "fill":
initially_nan = np.isnan(array_internal)
if nan_treatment == "fill":
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order="C")
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ("fill", "extend", "wrap"):
embed_result_within_padded_region = False
if boundary == "fill":
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(
array_shape + 2 * pad_width,
fill_value=fill_value,
dtype=float,
order="C",
)
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of
# [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0]
] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
] = array_internal
else:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
pad_width[2] : array_shape[2] + pad_width[2],
] = array_internal
else:
np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape // 2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ((pad_width[0],), (pad_width[1],))
else:
np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],))
array_to_convolve = np.pad(
array_internal, pad_width=np_pad_width, mode=np_pad_mode
)
_convolveNd_c(
result,
array_to_convolve,
kernel_internal,
nan_interpolate,
embed_result_within_padded_region,
n_threads,
)
# So far, normalization has only occurred for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
elif nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn(
"nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.",
AstropyUserWarning,
)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
array_unit = getattr(passed_array, "unit", None)
if array_unit is not None:
result <<= array_unit
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
else:
raise TypeError("Only 1D and 2D Kernels are supported.")
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == "f":
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@support_nddata(data="array")
def convolve_fft(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False,
mask=None,
crop=True,
return_fft=False,
fft_pad=None,
psf_pad=None,
min_wt=0.0,
allow_huge=False,
fftn=np.fft.fftn,
ifftn=np.fft.ifftn,
complex_dtype=complex,
dealias=False,
):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* It optionally pads to the nearest faster sizes to improve FFT speed.
These sizes are optimized for the numpy and scipy implementations, and
``fftconvolve`` uses them by default as well; when using other external
functions (see below), results may vary.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.org/project/pyFFTW/>`_ or
`pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also
offer somewhat better performance and a multi-threaded option.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution.
fill_value : float, optional
The value to use outside the array when using boundary='fill'.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
normalize_kernel : callable or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fft_pad : bool, optional
Default on. Zero-pad image to the nearest size supporting more efficient
execution of the FFT, generally values factorizable into the first 3-5
prime numbers. With ``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below.
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB.
fftn : callable, optional
The fft function. Can be overridden to use your own ffts,
e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``.
ifftn : callable, optional
The inverse fft function. Can be overridden the same way ``fttn``.
complex_dtype : complex type, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
dealias: bool, optional
Default off. Zero-pad image to enable explicit dealiasing
of convolution. With ``boundary='wrap'``, this will be disabled.
Note that for an input of nd dimensions this will increase
the size of the temporary arrays by at least ``1.5**nd``.
This may result in significantly more memory usage.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size.
Raises
------
`ValueError`
If the array is bigger than 1 GB after padding, will raise this
exception unless ``allow_huge`` is True.
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data
can become large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 and the update in
https://github.com/astropy/astropy/pull/11533 for further details.
Dealiasing of pseudospectral convolutions is necessary for
numerical stability of the underlying algorithms. A common
method for handling this is to zero pad the image by at least
1/2 to eliminate the wavenumbers which have been aliased
by convolution. This is so that the aliased 1/3 of the
results of the convolution computation can be thrown out. See
https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2
https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037
Note that if dealiasing is necessary to your application, but your
process is memory constrained, you may want to consider using
FFTW++: https://github.com/dealias/fftwpp. It includes python
wrappers for a pseudospectral convolution which will implicitly
dealias your convolution without the need for additional padding.
Note that one cannot use FFTW++'s convlution directly in this
method as in handles the entire convolution process internally.
Additionally, FFTW++ includes other useful pseudospectral methods to
consider.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([0.33333333, 1.33333333, 1. ])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([0.5, 2. , 1.5])
>>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP
array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00])
>>> convolve_fft([1, 2, 3], [1])
array([1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
array([1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([0.5, 2. , 1.5])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([0.5, 2. , 1.5])
>>> import scipy.fft # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn)
array([0.5, 2. , 1.5])
>>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores
>>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1)
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp)
array([0.5, 2. , 1.5])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError(
"Can't convolve two kernels with convolve_fft. Use convolve instead."
)
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Get array quantity if it exists
array_unit = getattr(array, "unit", None)
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(
array,
dtype=complex,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
kernel = _copy_input_if_needed(
kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0
)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (
np.prod(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_B > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_B)}. "
"Use allow_huge=True to override this exception."
)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
if nan_treatment == "fill":
array[nanmaskarray] = fill_value
else:
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1.0 / MAX_NORMALIZATION:
raise Exception(
"The kernel can't be normalized, because its sum is close to zero. The"
f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}"
)
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == "interpolate":
raise ValueError(
"Cannot interpolate NaNs with an unnormalizable kernel"
)
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn(
"The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary",
AstropyUserWarning,
)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == "fill":
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn(
f"psf_pad was set to {psf_pad}, which overrides the "
"boundary='fill' setting.",
AstropyUserWarning,
)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == "wrap":
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
if dealias:
raise ValueError("With boundary='wrap', dealias cannot be enabled.")
fill_value = 0 # force zero; it should not be used
elif boundary == "extend":
raise NotImplementedError(
"The 'extend' option is not implemented for fft-based convolution"
)
# Add shapes elementwise for psf_pad.
if psf_pad: # default=False
# add the sizes along each dimension (bigger)
newshape = np.array(arrayshape) + np.array(kernshape)
else:
# take the larger shape in each dimension (smaller)
newshape = np.maximum(arrayshape, kernshape)
if dealias:
# Extend shape by 1/2 for dealiasing
newshape += np.ceil(newshape / 2).astype(int)
# Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5).
if fft_pad: # default=True
# Get optimized sizes from scipy.
newshape = _next_fast_lengths(newshape)
# perform a second check after padding
array_size_C = (
np.prod(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_C > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_C)}. "
"Use allow_huge=True to override this exception."
)
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.prod(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.prod(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.prod(arrayshape)*np.dtype(bool).itemsize
# + np.prod(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [
slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2)
]
kernslices += [
slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2)
]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = nan_treatment == "interpolate"
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if array_unit is not None:
fftmult <<= array_unit
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide="ignore", invalid="ignore"):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.0:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(
array,
kernel,
nan_treatment="interpolate",
normalize_kernel=True,
preserve_nan=False,
**kwargs,
)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
if mode == "convolve_fft":
operator = SPECIAL_OPERATORS.add(
"convolve_fft", partial(convolve_fft, **kwargs)
)
elif mode == "convolve":
operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs))
else:
raise ValueError(f"Mode {mode} is not supported.")
return CompoundModel(operator, model, kernel)
def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
bounding_box : tuple
The bounding box which encompasses enough of the support of both
the ``model`` and ``kernel`` so that an accurate convolution can be
computed.
resolution : float
The resolution that one wishes to approximate the convolution
integral at.
cache : optional, bool
Default value True. Allow for the storage of the convolution
computation for later reuse.
**kwargs : dict
Keyword arguments to be passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs))
return Convolution(operator, model, kernel, bounding_box, resolution, cache)
|
f4413e9e9b7b20b1e5a8cc9137198611f58908c6ca90faa93f13b3543a18f568 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.units.tests.test_quantity_non_ufuncs import get_wrapped_functions
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .test_masked import MaskedArraySetup, assert_masked_equal
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(
func(self.a, *args, **kwargs), mask=func(self.mask_a, *args, **kwargs)
)
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(
func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs),
)
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize("axes", [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in axes if isinstance(axes, tuple) else (axes,):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match="not.*correct shape"):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1 + 2j, 3 + 4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize("value", [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize("value", [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop("ma_list", [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
self.check(np.concatenate, dtype="f4")
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0.0, Masked(1.0, True)], [Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.0], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50.0, 25.0], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50.0, 25.0])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.0).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_max(self):
self.check(np.max, method="max")
def test_min(self):
self.check(np.min, method="min")
def test_amax(self):
self.check(np.amax, method="max")
def test_amin(self):
self.check(np.amin, method="min")
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
self.check(np.sometrue, method="any")
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
self.check(np.alltrue, method="all")
def test_prod(self):
self.check(np.prod)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
self.check(np.product, method="prod")
def test_cumprod(self):
self.check(np.cumprod)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
self.check(np.cumproduct, method="cumprod")
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round, method="round")
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`round_` is deprecated as of NumPy 1.25.0")
def test_round_(self):
self.check(np.round_, method="round")
def test_around(self):
self.check(np.around, method="round")
def test_clip(self):
self.check(np.clip, 2.0, 4.0)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.0)
expected = np.where(mask, self.a, 1000.0)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.0)
expected2 = np.where(mask, self.a, 1000.0)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match="either both or neither"):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default)
expected = np.select(
[a < 1.5, a > 3.5],
[a, a + 1],
default=-1 if default is not np.ma.masked else 0,
)
expected_mask = np.select(
[a < 1.5, a > 3.5],
[mask_a, mask_a],
default=getattr(default, "mask", False),
)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.0], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3.0, 4.0]] * 2)
self.mask_a = np.array([[False] * 5, [True] * 4 + [False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == "b"
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1.0 + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1.0 + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1.0 + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1.0 + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)[
self.mask_a | self.mask_b
].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape(
result.shape
)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.0).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
# Check function by comparing to nan-equivalent, with masked
# values set to NaN.
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, "nan" + function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
# Also check that we can give an output MaskedArray.
if NUMPY_LT_1_25 and kwargs.get("keepdims", False):
# numpy bug gh-22714 prevents using out with keepdims=True.
# This is fixed in numpy 1.25.
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
# But that a regular array cannot be used since it has no mask.
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(self, axis, keepdims):
self.check(np.median, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_quantile(self, axis, keepdims):
self.check(np.quantile, q=[0.25, 0.5], axis=axis, keepdims=keepdims)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match="must be in the range"):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.0)
mask = np.concatenate(
[np.ones((2, 1), bool), self.mask_a, np.zeros((2, 1), bool)], axis=-1
)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [
(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack(
[
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2],
],
axis=-1,
),
]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1.0, 7.0).reshape(2, 3)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10.0, 3.0])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(
self.mask_a | self.mask_b, expected.shape
).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.0)
fp = np.array([1.0, 5.0, 6.0, 19.0, 20.0])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.0])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.0])
expected = np.piecewise(self.a, condlist, [-1, 1.0])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(
self.ma,
condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.0), mask=~x.mask)],
)
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(
self.mask_a, condlist2, [True, False, lambda x: ~x]
)
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match="with 2 condition"):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True] + [False] * 5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(
np.array([1 + 2j, 0 + 4j, 3 + 0j, -1 - 1j]),
mask=[True, False, False, False],
)
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex}
)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype="u1")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize("axis", [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(["2020-12-31", "2021-01-01", "2021-01-02"], dtype="M")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([["2021-01-07"], ["2021-01-31"]], dtype="M")
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestNaNFunctions:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
d8710a5338dee4bdfc02317ed49d4f5d98308c941394ab47a199740aed331ec1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_max(self):
self.check(np.max)
def test_min(self):
self.check(np.min)
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round(self):
self.check(np.round)
# NUMPY_LT_1_25
@pytest.mark.filterwarnings("ignore:`round_` is deprecated as of NumPy 1.25.0")
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
def test_array_equal_incompatible_units(self):
assert not np.array_equal([1, 2] * u.m, [1, 2] * u.s)
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
def test_array_equiv_incompatible_units(self):
assert not np.array_equiv([1, 1] * u.m, [1] * u.s)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@classmethod
def _range_value(cls, range, unit):
if isinstance(range, u.Quantity):
return range.to_value(unit)
else:
return [cls._range_value(r, unit) for r in range]
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_range(self, range):
self.check(
np.histogram,
self.x,
range=range,
value_args=[self.x.value],
value_kwargs=dict(range=self._range_value(range, self.x.unit)),
expected_units=(None, self.x.unit),
)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.parametrize("range", [[2 * u.m, 500 * u.cm], [2, 5] * u.m])
@needs_array_function
def test_histogram_bin_edges_range(self, range):
out_b = np.histogram_bin_edges(self.x, range=range)
expected_b = np.histogram_bin_edges(
self.x.value, range=self._range_value(range, self.x.unit)
)
assert np.all(out_b.value == expected_b)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogram2d_range(self, range):
self.check(
np.histogram2d,
self.x,
self.y,
range=range,
value_args=[self.x.value, self.y.value],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, self.x.unit, self.y.unit),
)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.parametrize(
argnames="range",
argvalues=[
[[2 * u.m, 500 * u.cm], [1 * u.cm, 40 * u.mm]],
[[200, 500] * u.cm, [10, 40] * u.mm],
[[200, 500], [1, 4]] * u.cm,
],
)
@needs_array_function
def test_histogramdd_range(self, range):
self.check(
np.histogramdd,
(self.x, self.y),
range=range,
value_args=[(self.x.value, self.y.value)],
value_kwargs=dict(
range=[
self._range_value(r, un)
for (r, un) in zip(range, (self.x.unit, self.y.unit))
]
),
expected_units=(None, (self.x.unit, self.y.unit)),
)
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
6a0283eb80e89ff3d5735eae961b9060fa73a6218bacd1a252e1809059150e8d | from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy import time
from astropy.constants import c
from astropy.coordinates import (
FK5,
GCRS,
ICRS,
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
Galactic,
SkyCoord,
SpectralQuantity,
get_body_barycentric_posvel,
)
from astropy.coordinates.spectral_coordinate import (
SpectralCoord,
_apply_relativistic_doppler_shift,
)
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose, quantity_allclose
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning, AstropyWarning
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES as FITSWCS_VELOCITY_FRAMES
def assert_frame_allclose(
frame1,
frame2,
pos_rtol=1e-7,
pos_atol=1 * u.m,
vel_rtol=1e-7,
vel_atol=1 * u.mm / u.s,
):
# checks that:
# - the positions are equal to within some tolerance (the relative tolerance
# should be dimensionless, the absolute tolerance should be a distance).
# note that these are the tolerances *in 3d*
# - either both or nether frame has velocities, or if one has no velocities
# the other one can have zero velocities
# - if velocities are present, they are equal to some tolerance
# Ideally this should accept both frames and SkyCoords
if hasattr(frame1, "frame"): # SkyCoord-like
frame1 = frame1.frame
if hasattr(frame2, "frame"): # SkyCoord-like
frame2 = frame2.frame
# assert (frame1.data.differentials and frame2.data.differentials or
# (not frame1.data.differentials and not frame2.data.differentials))
assert frame1.is_equivalent_frame(frame2)
frame2_in_1 = frame2.transform_to(frame1)
assert_quantity_allclose(
0 * u.m, frame1.separation_3d(frame2_in_1), rtol=pos_rtol, atol=pos_atol
)
if frame1.data.differentials:
d1 = frame1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
d2 = frame2_in_1.data.represent_as(
CartesianRepresentation, CartesianDifferential
).differentials["s"]
assert_quantity_allclose(d1.norm(d1), d1.norm(d2), rtol=vel_rtol, atol=vel_atol)
@pytest.fixture(scope="module")
def greenwich_earthlocation(request):
if (
not hasattr(EarthLocation, "_site_registry")
and request.config.getoption("remote_data") == "none"
):
EarthLocation._get_site_registry(force_builtin=True)
return EarthLocation.of_site("Greenwich")
# GENERAL TESTS
# We first run through a series of cases to test different ways of initializing
# the observer and target for SpectralCoord, including for example frames,
# SkyCoords, and making sure that SpectralCoord is not sensitive to the actual
# frame or representation class.
# Local Standard of Rest
LSRD = Galactic(
u=0.1 * u.km,
v=0.1 * u.km,
w=0.1 * u.km,
U=9 * u.km / u.s,
V=12 * u.km / u.s,
W=7 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
LSRD_EQUIV = [
LSRD,
SkyCoord(LSRD), # as a SkyCoord
LSRD.transform_to(ICRS()), # different frame
LSRD.transform_to(ICRS()).transform_to(Galactic()), # different representation
]
@pytest.fixture(params=[None] + LSRD_EQUIV)
def observer(request):
return request.param
# Target located in direction of motion of LSRD with no velocities
LSRD_DIR_STATIONARY = Galactic(
u=9 * u.km, v=12 * u.km, w=7 * u.km, representation_type="cartesian"
)
LSRD_DIR_STATIONARY_EQUIV = [
LSRD_DIR_STATIONARY,
SkyCoord(LSRD_DIR_STATIONARY), # as a SkyCoord
LSRD_DIR_STATIONARY.transform_to(FK5()), # different frame
# different representation
LSRD_DIR_STATIONARY.transform_to(ICRS()).transform_to(Galactic()),
]
@pytest.fixture(params=[None] + LSRD_DIR_STATIONARY_EQUIV)
def target(request):
return request.param
def test_create_spectral_coord_observer_target(observer, target):
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
coord = SpectralCoord([100, 200, 300] * u.nm, observer=observer, target=target)
if observer is None:
assert coord.observer is None
else:
assert_frame_allclose(observer, coord.observer)
if target is None:
assert coord.target is None
else:
assert_frame_allclose(target, coord.target)
assert coord.doppler_rest is None
assert coord.doppler_convention is None
if observer is None or target is None:
assert quantity_allclose(coord.redshift, 0)
assert quantity_allclose(coord.radial_velocity, 0 * u.km / u.s)
elif any(observer is lsrd for lsrd in LSRD_EQUIV) and any(
target is lsrd for lsrd in LSRD_DIR_STATIONARY_EQUIV
):
assert_quantity_allclose(
coord.radial_velocity, -(274**0.5) * u.km / u.s, atol=1e-4 * u.km / u.s
)
assert_quantity_allclose(coord.redshift, -5.5213158163147646e-05, atol=1e-9)
else:
raise NotImplementedError()
def test_create_from_spectral_coord(observer, target):
"""
Checks that parameters are correctly copied to the new SpectralCoord object
"""
with nullcontext() if target is None else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
spec_coord1 = SpectralCoord(
[100, 200, 300] * u.nm,
observer=observer,
target=target,
doppler_convention="optical",
doppler_rest=6000 * u.AA,
)
spec_coord2 = SpectralCoord(spec_coord1)
assert spec_coord1.observer == spec_coord2.observer
assert spec_coord1.target == spec_coord2.target
assert spec_coord1.radial_velocity == spec_coord2.radial_velocity
assert spec_coord1.doppler_convention == spec_coord2.doppler_convention
assert spec_coord1.doppler_rest == spec_coord2.doppler_rest
# INTERNAL FUNCTIONS TESTS
def test_apply_relativistic_doppler_shift():
# Frequency
sq1 = SpectralQuantity(1 * u.GHz)
sq2 = _apply_relativistic_doppler_shift(sq1, 0.5 * c)
assert_quantity_allclose(sq2, np.sqrt(1.0 / 3.0) * u.GHz)
# Wavelength
sq3 = SpectralQuantity(500 * u.nm)
sq4 = _apply_relativistic_doppler_shift(sq3, 0.5 * c)
assert_quantity_allclose(sq4, np.sqrt(3) * 500 * u.nm)
# Energy
sq5 = SpectralQuantity(300 * u.eV)
sq6 = _apply_relativistic_doppler_shift(sq5, 0.5 * c)
assert_quantity_allclose(sq6, np.sqrt(1.0 / 3.0) * 300 * u.eV)
# Wavenumber
sq7 = SpectralQuantity(0.01 / u.micron)
sq8 = _apply_relativistic_doppler_shift(sq7, 0.5 * c)
assert_quantity_allclose(sq8, np.sqrt(1.0 / 3.0) * 0.01 / u.micron)
# Velocity (doppler_convention='relativistic')
sq9 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq10 = _apply_relativistic_doppler_shift(sq9, 300 * u.km / u.s)
assert_quantity_allclose(sq10, 499.999666 * u.km / u.s)
assert sq10.doppler_convention == "relativistic"
# Velocity (doppler_convention='optical')
sq11 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="radio", doppler_rest=1 * u.GHz
)
sq12 = _apply_relativistic_doppler_shift(sq11, 300 * u.km / u.s)
assert_quantity_allclose(sq12, 499.650008 * u.km / u.s)
assert sq12.doppler_convention == "radio"
# Velocity (doppler_convention='radio')
sq13 = SpectralQuantity(
200 * u.km / u.s, doppler_convention="optical", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 300 * u.km / u.s)
assert_quantity_allclose(sq14, 500.350493 * u.km / u.s)
assert sq14.doppler_convention == "optical"
# Velocity - check relativistic velocity addition
sq13 = SpectralQuantity(
0 * u.km / u.s, doppler_convention="relativistic", doppler_rest=1 * u.GHz
)
sq14 = _apply_relativistic_doppler_shift(sq13, 0.999 * c)
assert_quantity_allclose(sq14, 0.999 * c)
sq14 = _apply_relativistic_doppler_shift(sq14, 0.999 * c)
assert_quantity_allclose(sq14, (0.999 * 2) / (1 + 0.999**2) * c)
assert sq14.doppler_convention == "relativistic"
# Cases that should raise errors
sq15 = SpectralQuantity(200 * u.km / u.s)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq15, 300 * u.km / u.s)
sq16 = SpectralQuantity(200 * u.km / u.s, doppler_rest=10 * u.GHz)
with pytest.raises(ValueError, match="doppler_convention not set"):
_apply_relativistic_doppler_shift(sq16, 300 * u.km / u.s)
sq17 = SpectralQuantity(200 * u.km / u.s, doppler_convention="optical")
with pytest.raises(ValueError, match="doppler_rest not set"):
_apply_relativistic_doppler_shift(sq17, 300 * u.km / u.s)
# BASIC TESTS
def test_init_quantity():
sc = SpectralCoord(10 * u.GHz)
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention is None
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_spectral_quantity():
sc = SpectralCoord(SpectralQuantity(10 * u.GHz, doppler_convention="optical"))
assert sc.value == 10.0
assert sc.unit is u.GHz
assert sc.doppler_convention == "optical"
assert sc.doppler_rest is None
assert sc.observer is None
assert sc.target is None
def test_init_too_many_args():
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz,
observer=LSRD,
target=SkyCoord(10, 20, unit="deg"),
radial_velocity=1 * u.km / u.s,
)
with pytest.raises(
ValueError, match="Cannot specify radial velocity or redshift if both"
):
SpectralCoord(
10 * u.GHz, observer=LSRD, target=SkyCoord(10, 20, unit="deg"), redshift=1
)
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.km / u.s, redshift=1)
def test_init_wrong_type():
with pytest.raises(
TypeError, match="observer must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, observer=3.4)
with pytest.raises(
TypeError, match="target must be a SkyCoord or coordinate frame instance"
):
SpectralCoord(10 * u.GHz, target=3.4)
with pytest.raises(
u.UnitsError,
match=(
"Argument 'radial_velocity' to function "
"'__new__' must be in units convertible to 'km / s'"
),
):
SpectralCoord(10 * u.GHz, radial_velocity=1 * u.kg)
with pytest.raises(
TypeError,
match=(
"Argument 'radial_velocity' to function '__new__' has no 'unit' attribute."
" You should pass in an astropy Quantity instead."
),
):
SpectralCoord(10 * u.GHz, radial_velocity="banana")
with pytest.raises(u.UnitsError, match="redshift should be dimensionless"):
SpectralCoord(10 * u.GHz, redshift=1 * u.m)
with pytest.raises(
TypeError,
match='Cannot parse "banana" as a Quantity. It does not start with a number.',
):
SpectralCoord(10 * u.GHz, redshift="banana")
def test_observer_init_rv_behavior():
"""
Test basic initialization behavior or observer/target and redshift/rv
"""
# Start off by specifying the radial velocity only
sc_init = SpectralCoord([4000, 5000] * u.AA, radial_velocity=100 * u.km / u.s)
assert sc_init.observer is None
assert sc_init.target is None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Next, set the observer, and check that the radial velocity hasn't changed
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init.observer = ICRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
assert sc_init.observer is not None
assert_quantity_allclose(sc_init.radial_velocity, 100 * u.km / u.s)
# Setting the target should now cause the original radial velocity to be
# dropped in favor of the automatically computed one
sc_init.target = SkyCoord(
CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]),
frame="icrs",
radial_velocity=30 * u.km / u.s,
)
assert sc_init.target is not None
assert_quantity_allclose(sc_init.radial_velocity, 30 * u.km / u.s)
# The observer can only be set if originally None - now that it isn't
# setting it again should fail
with pytest.raises(ValueError, match="observer has already been set"):
sc_init.observer = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
# And similarly, changing the target should not be possible
with pytest.raises(ValueError, match="target has already been set"):
sc_init.target = GCRS(CartesianRepresentation([0 * u.km, 1 * u.km, 0 * u.km]))
def test_rv_redshift_initialization():
# Check that setting the redshift sets the radial velocity appropriately,
# and that the redshift can be recovered
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=1)
assert isinstance(sc_init.redshift, u.Quantity)
assert_quantity_allclose(sc_init.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init.radial_velocity, 0.6 * c)
# Check that setting the same radial velocity produces the same redshift
# and that the radial velocity can be recovered
sc_init2 = SpectralCoord([4000, 5000] * u.AA, radial_velocity=0.6 * c)
assert_quantity_allclose(sc_init2.redshift, 1 * u.dimensionless_unscaled)
assert_quantity_allclose(sc_init2.radial_velocity, 0.6 * c)
# Check that specifying redshift as a quantity works
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1 * u.one)
assert sc_init.redshift == sc_init3.redshift
# Make sure that both redshift and radial velocity can't be specified at
# the same time.
with pytest.raises(
ValueError, match="Cannot set both a radial velocity and redshift"
):
SpectralCoord([4000, 5000] * u.AA, radial_velocity=10 * u.km / u.s, redshift=2)
def test_replicate():
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_set_rv = sc_init.replicate(redshift=1)
assert_quantity_allclose(sc_set_rv.radial_velocity, 0.6 * c)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
sc_set_rv = sc_init.replicate(radial_velocity=c / 2)
assert_quantity_allclose(sc_set_rv.redshift, np.sqrt(3) - 1)
assert_quantity_allclose(sc_init, [4000, 5000] * u.AA)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init2 = SpectralCoord([4000, 5000] * u.AA, redshift=1, observer=gcrs_origin)
with np.errstate(all="ignore"):
sc_init2.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init3 = SpectralCoord([4000, 5000] * u.AA, redshift=1, target=gcrs_origin)
with np.errstate(all="ignore"):
sc_init3.replicate(redshift=0.5)
assert_quantity_allclose(sc_init2, [4000, 5000] * u.AA)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_init4 = SpectralCoord(
[4000, 5000] * u.AA, observer=gcrs_origin, target=gcrs_origin
)
with pytest.raises(
ValueError,
match=(
"Cannot specify radial velocity or redshift if both target and observer are"
" specified"
),
):
sc_init4.replicate(redshift=0.5)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_copy = sc_init.replicate(copy=True)
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_copy, [4000, 5000] * u.AA)
sc_init = SpectralCoord([4000, 5000] * u.AA, redshift=2)
sc_init_ref = sc_init.replicate()
sc_init[0] = 6000 * u.AA
assert_quantity_allclose(sc_init_ref, [6000, 5000] * u.AA)
def test_with_observer_stationary_relative_to():
# Simple tests of with_observer_stationary_relative_to to cover different
# ways of calling it
# The replicate method makes a new object with attributes updated, but doesn't
# do any conversion
sc1 = SpectralCoord([4000, 5000] * u.AA)
with pytest.raises(
ValueError,
match=(
"This method can only be used if both observer and target are defined on"
" the SpectralCoord"
),
):
sc1.with_observer_stationary_relative_to("icrs")
sc2 = SpectralCoord(
[4000, 5000] * u.AA,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-1 * u.km / u.s,
0 * u.km / u.s,
-1 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
0 * u.deg, 45 * u.deg, distance=1 * u.kpc, radial_velocity=2 * u.km / u.s
),
)
# Motion of observer is in opposite direction to target
assert_quantity_allclose(sc2.radial_velocity, (2 + 2**0.5) * u.km / u.s)
# Change to observer that is stationary in ICRS
sc3 = sc2.with_observer_stationary_relative_to("icrs")
# Velocity difference is now pure radial velocity of target
assert_quantity_allclose(sc3.radial_velocity, 2 * u.km / u.s)
# Check setting the velocity in with_observer_stationary_relative_to
sc4 = sc2.with_observer_stationary_relative_to(
"icrs", velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
# Observer once again moving away from target but faster
assert_quantity_allclose(sc4.radial_velocity, 4 * u.km / u.s)
# Check that we can also pass frame classes instead of names
sc5 = sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc5.radial_velocity, 4 * u.km / u.s)
# And make sure we can also pass instances of classes without data
sc6 = sc2.with_observer_stationary_relative_to(
ICRS(), velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s
)
assert_quantity_allclose(sc6.radial_velocity, 4 * u.km / u.s)
# And with data provided no velocities are present
sc7 = sc2.with_observer_stationary_relative_to(
ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian"),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc7.radial_velocity, 4 * u.km / u.s)
# And also have the ability to pass frames with velocities already defined
sc8 = sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
assert_quantity_allclose(
sc8.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Make sure that things work properly if passing a SkyCoord
sc9 = sc2.with_observer_stationary_relative_to(
SkyCoord(ICRS(0 * u.km, 0 * u.km, 0 * u.km, representation_type="cartesian")),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
assert_quantity_allclose(sc9.radial_velocity, 4 * u.km / u.s)
sc10 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
)
)
assert_quantity_allclose(
sc10.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# But we shouldn't be able to pass both a frame with velocities, and explicit velocities
with pytest.raises(
ValueError,
match="frame already has differentials, cannot also specify velocity",
):
sc2.with_observer_stationary_relative_to(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
),
velocity=[-(2**0.5), 0, -(2**0.5)] * u.km / u.s,
)
# And velocities should have three elements
with pytest.raises(
ValueError, match="velocity should be a Quantity vector with 3 elements"
):
sc2.with_observer_stationary_relative_to(
ICRS, velocity=[-(2**0.5), 0, -(2**0.5), -3] * u.km / u.s
)
# Make sure things don't change depending on what frame class is used for reference
sc11 = sc2.with_observer_stationary_relative_to(
SkyCoord(
ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
2**0.5 * u.km / u.s,
0 * u.km / u.s,
2**0.5 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
).transform_to(Galactic)
)
assert_quantity_allclose(
sc11.radial_velocity, 0 * u.km / u.s, atol=1e-10 * u.km / u.s
)
# Check that it is possible to preserve the observer frame
sc12 = sc2.with_observer_stationary_relative_to(LSRD)
sc13 = sc2.with_observer_stationary_relative_to(LSRD, preserve_observer_frame=True)
assert isinstance(sc12.observer, Galactic)
assert isinstance(sc13.observer, ICRS)
def test_los_shift_radial_velocity():
# Tests to make sure that with_radial_velocity_shift correctly calculates
# the new radial velocity
# First check case where observer and/or target aren't specified
sc1 = SpectralCoord(500 * u.nm, radial_velocity=1 * u.km / u.s)
sc2 = sc1.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc2.radial_velocity, 2 * u.km / u.s)
sc3 = sc1.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc3.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc4 = SpectralCoord(
500 * u.nm, radial_velocity=1 * u.km / u.s, observer=gcrs_not_origin
)
sc5 = sc4.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc5.radial_velocity, 2 * u.km / u.s)
sc6 = sc4.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc6.radial_velocity, -2 * u.km / u.s)
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc7 = SpectralCoord(
500 * u.nm,
radial_velocity=1 * u.km / u.s,
target=ICRS(10 * u.deg, 20 * u.deg),
)
sc8 = sc7.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc8.radial_velocity, 2 * u.km / u.s)
sc9 = sc7.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc9.radial_velocity, -2 * u.km / u.s)
# Check that things still work when both observer and target are specified
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc10 = SpectralCoord(
500 * u.nm,
observer=ICRS(0 * u.deg, 0 * u.deg, distance=1 * u.m),
target=ICRS(
10 * u.deg,
20 * u.deg,
radial_velocity=1 * u.km / u.s,
distance=10 * u.kpc,
),
)
sc11 = sc10.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc11.radial_velocity, 2 * u.km / u.s)
sc12 = sc10.with_radial_velocity_shift(-3 * u.km / u.s)
assert_quantity_allclose(sc12.radial_velocity, -2 * u.km / u.s)
# Check that things work if radial_velocity wasn't specified at all
sc13 = SpectralCoord(500 * u.nm)
sc14 = sc13.with_radial_velocity_shift(1 * u.km / u.s)
assert_quantity_allclose(sc14.radial_velocity, 1 * u.km / u.s)
sc15 = sc1.with_radial_velocity_shift()
assert_quantity_allclose(sc15.radial_velocity, 1 * u.km / u.s)
# Check that units are verified
with pytest.raises(
u.UnitsError,
match=(
"Argument must have unit physical type 'speed' for radial velocty or "
"'dimensionless' for redshift."
),
):
sc1.with_radial_velocity_shift(target_shift=1 * u.kg)
@pytest.mark.xfail
def test_relativistic_radial_velocity():
# Test for when both observer and target have relativistic velocities.
# This is not yet supported, so the test is xfailed for now.
sc = SpectralCoord(
500 * u.nm,
observer=ICRS(
0 * u.km,
0 * u.km,
0 * u.km,
-0.5 * c,
-0.5 * c,
-0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
target=ICRS(
1 * u.kpc,
1 * u.kpc,
1 * u.kpc,
0.5 * c,
0.5 * c,
0.5 * c,
representation_type="cartesian",
differential_type="cartesian",
),
)
assert_quantity_allclose(sc.radial_velocity, 0.989743318610787 * u.km / u.s)
# SCIENCE USE CASE TESTS
def test_spectral_coord_jupiter(greenwich_earthlocation):
"""
Checks radial velocity between Earth and Jupiter
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
pos, vel = get_body_barycentric_posvel("jupiter", obstime)
jupiter = SkyCoord(
pos.with_differentials(CartesianDifferential(vel.xyz)), obstime=obstime
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=jupiter)
# The velocity should be less than ~43 + a bit extra, which is the
# maximum possible earth-jupiter relative velocity. We check the exact
# value here (determined from SpectralCoord, so this serves as a test to
# check that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -7.35219854 * u.km / u.s)
def test_spectral_coord_alphacen(greenwich_earthlocation):
"""
Checks radial velocity between Earth and Alpha Centauri
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# acen = SkyCoord.from_name('alpha cen')
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
radial_velocity=-18.0 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=acen)
# The velocity should be less than ~18 + 30 + a bit extra, which is the
# maximum possible relative velocity. We check the exact value here
# (determined from SpectralCoord, so this serves as a test to check that
# this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -26.328301 * u.km / u.s)
def test_spectral_coord_m31(greenwich_earthlocation):
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time("2018-12-13 9:00")
obs = greenwich_earthlocation.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(
ra=10.6847 * u.deg,
dec=41.269 * u.deg,
distance=710 * u.kpc,
radial_velocity=-300 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191)
def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563] * u.AA
observed_spc = SpectralCoord(rest_line_wls * (z + 1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target)
def test_shift_to_rest_star_withobserver(greenwich_earthlocation):
rv = -8.3283011 * u.km / u.s
rest_line_wls = [5007, 6563] * u.AA
obstime = time.Time("2018-12-13 9:00")
eloc = greenwich_earthlocation
obs = eloc.get_gcrs(obstime)
acen = SkyCoord(
ra=219.90085 * u.deg,
dec=-60.83562 * u.deg,
frame="icrs",
distance=4.37 * u.lightyear,
)
# Note that above the rv is missing from the SkyCoord.
# That's intended, as it will instead be set in the `SpectralCoord`. But
# the SpectralCoord machinery should yield something comparable to test_
# spectral_coord_alphacen
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
observed_spc = SpectralCoord(
rest_line_wls * (rv / c + 1), observer=obs, target=acen
)
rest_spc = observed_spc.to_rest()
assert_quantity_allclose(rest_spc, rest_line_wls)
barycentric_spc = observed_spc.with_observer_stationary_relative_to("icrs")
baryrest_spc = barycentric_spc.to_rest()
assert quantity_allclose(baryrest_spc, rest_line_wls)
# now make sure the change the barycentric shift did is comparable to the
# offset rv_correction produces
# barytarg = SkyCoord(barycentric_spc.target.frame) # should be this but that doesn't work for unclear reasons
barytarg = SkyCoord(
barycentric_spc.target.data.without_differentials(),
frame=barycentric_spc.target.realize_frame(None),
)
vcorr = barytarg.radial_velocity_correction(
kind="barycentric", obstime=obstime, location=eloc
)
drv = baryrest_spc.radial_velocity - observed_spc.radial_velocity
# note this probably will not work on the first try, but it's ok if this is
# "good enough", where good enough is estimated below. But that could be
# adjusted if we think that's too aggressive of a precision target for what
# the machinery can handle
# with pytest.raises(AssertionError):
assert_quantity_allclose(vcorr, drv, atol=10 * u.m / u.s)
gcrs_origin = GCRS(CartesianRepresentation([0 * u.km, 0 * u.km, 0 * u.km]))
gcrs_not_origin = GCRS(CartesianRepresentation([1 * u.km, 0 * u.km, 0 * u.km]))
@pytest.mark.parametrize(
"sc_kwargs",
[
dict(radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(target=gcrs_origin, radial_velocity=0 * u.km / u.s),
dict(observer=gcrs_origin, target=gcrs_not_origin),
],
)
def test_los_shift(sc_kwargs):
wl = [4000, 5000] * u.AA
with nullcontext() if "observer" not in sc_kwargs and "target" not in sc_kwargs else pytest.warns(
AstropyUserWarning, match="No velocity defined on frame"
):
sc_init = SpectralCoord(wl, **sc_kwargs)
# these should always work in *all* cases because it's unambiguous that
# a target shift should behave this way
new_sc1 = sc_init.with_radial_velocity_shift(0.1)
assert_quantity_allclose(new_sc1, wl * 1.1)
# interpret at redshift
new_sc2 = sc_init.with_radial_velocity_shift(0.1 * u.dimensionless_unscaled)
assert_quantity_allclose(new_sc1, new_sc2)
new_sc3 = sc_init.with_radial_velocity_shift(-100 * u.km / u.s)
assert_quantity_allclose(new_sc3, wl * (1 + (-100 * u.km / u.s / c)))
# now try the cases where observer is specified as well/instead
if sc_init.observer is None or sc_init.target is None:
with pytest.raises(ValueError):
# both must be specified if you're going to mess with observer
sc_init.with_radial_velocity_shift(observer_shift=0.1)
if sc_init.observer is not None and sc_init.target is not None:
# redshifting the observer should *blushift* the LOS velocity since
# its the observer-to-target vector that matters
new_sc4 = sc_init.with_radial_velocity_shift(observer_shift=0.1)
assert_quantity_allclose(new_sc4, wl / 1.1)
# an equal shift in both should produce no offset at all
new_sc5 = sc_init.with_radial_velocity_shift(
target_shift=0.1, observer_shift=0.1
)
assert_quantity_allclose(new_sc5, wl)
def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time("2018-12-13 9:00")
dt = 12 * u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the asteroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0] * u.km / u.s
x1 = -v_ast[0] * dt / 2
x2 = v_ast[0] * dt / 2
z = 10 * u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(
CartesianRepresentation(x1.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time1,
)
asteroid_loc2 = GCRS(
CartesianRepresentation(x2.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time2,
)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(
CartesianRepresentation([0 * u.km, 35000 * u.km, 0 * u.km]), obstime=time1
)
observer2 = GCRS(
CartesianRepresentation([0 * u.km, -35000 * u.km, 0 * u.km]), obstime=time2
)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0 * u.km / u.s
assert spec_coord1.radial_velocity > -5 * u.km / u.s
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0 * u.km / u.s
assert spec_coord2.radial_velocity < 5 * u.km / u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(
target_sc2.radial_velocity, 0 * u.km / u.s, atol=1e-7 * u.km / u.s
)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1 / (1 + spec_coord1.redshift))
# TODO: Figure out what is meant by the below use case
# ensure the "target-rest" use gives the same answer
# target_sc1_alt = spec_coord1.with_observer_stationary_relative_to('target-rest')
# assert_quantity_allclose(target_sc1, target_sc1_alt)
def test_spectral_coord_from_sky_coord_without_distance():
# see https://github.com/astropy/specutils/issues/658 for issue context
obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type="cartesian")
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs)
# coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access
with pytest.warns(
AstropyUserWarning, match="Distance on coordinate object is dimensionless"
):
coord.target = SkyCoord(ra=10.68470833 * u.deg, dec=41.26875 * u.deg)
EXPECTED_VELOCITY_FRAMES = {
"geocent": "gcrs",
"heliocent": "hcrs",
"lsrk": "lsrk",
"lsrd": "lsrd",
"galactoc": FITSWCS_VELOCITY_FRAMES["GALACTOC"],
"localgrp": FITSWCS_VELOCITY_FRAMES["LOCALGRP"],
}
@pytest.mark.parametrize("specsys", list(EXPECTED_VELOCITY_FRAMES))
@pytest.mark.slow
def test_spectralcoord_accuracy(specsys):
# This is a test to check the numerical results of transformations between
# different velocity frames in SpectralCoord. This compares the velocity
# shifts determined with SpectralCoord to those determined from the rv
# package in Starlink.
velocity_frame = EXPECTED_VELOCITY_FRAMES[specsys]
reference_filename = get_pkg_data_filename("accuracy/data/rv.ecsv")
reference_table = Table.read(reference_filename, format="ascii.ecsv")
rest = 550 * u.nm
with iers.conf.set_temp("auto_download", False):
for row in reference_table:
observer = EarthLocation.from_geodetic(
-row["obslon"], row["obslat"]
).get_itrs(obstime=row["obstime"])
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
sc_topo = SpectralCoord(
545 * u.nm, observer=observer, target=row["target"]
)
# FIXME: A warning is emitted for dates after MJD=57754.0 even
# though the leap second table should be valid until the end of
# 2020.
with nullcontext() if row["obstime"].mjd < 57754 else pytest.warns(
AstropyWarning, match="Tried to get polar motions"
):
sc_final = sc_topo.with_observer_stationary_relative_to(velocity_frame)
delta_vel = sc_topo.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
) - sc_final.to(
u.km / u.s, doppler_convention="relativistic", doppler_rest=rest
)
if specsys == "galactoc":
assert_allclose(
delta_vel.to_value(u.km / u.s), row[specsys.lower()], atol=30
)
else:
assert_allclose(
delta_vel.to_value(u.km / u.s),
row[specsys.lower()],
atol=0.02,
rtol=0.002,
)
# TODO: add test when target is not ICRS
# TODO: add test when SpectralCoord is in velocity to start with
|
ccbfbeb3988e8efd6e6bc67fa1930d5d8b2d9180f9cbe96dcc40d7fd12914f8b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import configparser
import doctest
import os
import sys
from datetime import datetime
from importlib import metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires("astropy"):
if 'extra == "docs"' in line:
req = Requirement(line.split(";")[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print(
"The following packages could not be found and are required to "
"build the documentation:"
)
for key, val in missing_requirements.items():
print(f" * {key} {val}")
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa: E402
from sphinx_astropy.conf.v1 import ( # noqa: E402
exclude_patterns,
extensions,
intersphinx_mapping,
numpydoc_xref_aliases,
numpydoc_xref_astropy_aliases,
numpydoc_xref_ignore,
rst_epilog,
)
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {
"axes.labelsize": "large",
"figure.figsize": (6, 6),
"figure.subplot.hspace": 0.5,
"savefig.bbox": "tight",
"savefig.facecolor": "none",
}
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ["png", "svg", "pdf"]
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.0"
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping["astropy"]
# add any custom intersphinx for astropy
intersphinx_mapping.update(
{
"astropy-dev": ("https://docs.astropy.org/en/latest/", None),
"pyerfa": ("https://pyerfa.readthedocs.io/en/stable/", None),
"pytest": ("https://docs.pytest.org/en/stable/", None),
"ipython": ("https://ipython.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sphinx_automodapi": (
"https://sphinx-automodapi.readthedocs.io/en/stable/",
None,
),
"asdf-astropy": ("https://asdf-astropy.readthedocs.io/en/latest/", None),
"fsspec": ("https://filesystem-spec.readthedocs.io/en/latest/", None),
}
)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# .inc.rst mean *include* files, don't have sphinx process them
exclude_patterns += ["_templates", "changes", "_pkgtemplate.rst", "**/*.inc.rst"]
# Add any paths that contain templates here, relative to this directory.
if "templates_path" not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append("_templates")
extensions += ["sphinx_changelog"]
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg"))
__minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "")
min_versions = {}
for line in metadata.requires("astropy"):
req = Requirement(line.split(";")[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(
minimum_python=__minimum_python_version__, **min_versions
)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT")
REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA")
FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP")
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update(
{
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments",
"Path",
# TODO! not need to ignore.
"flag",
"bits",
}
)
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update(
{
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`",
}
)
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# Turn off table of contents entries for functions and classes
toc_object_entries = False
# -- Project information ------------------------------------------------------
project = "Astropy"
author = "The Astropy Developers"
copyright = f"2011–{datetime.utcnow().year}, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# Only include dev docs in dev version.
dev = "dev" in release
if not dev:
exclude_patterns += ["development/*", "testhelpers.rst"]
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ["astropy."]
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"{project} v{release}"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", project + ".tex", project + " Documentation", author, "manual")
]
latex_logo = "_static/astropy_logo.pdf"
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project.lower(), project + " Documentation", [author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = "https://github.com/astropy/astropy/issues/"
edit_on_github_branch = "main"
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open("nitpick-exceptions"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
nitpick_ignore.append((dtype, target.strip()))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
"backreferences_dir": "generated/modules", # path to store the module using example template
"filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_"
"examples_dirs": f"..{os.sep}examples", # path to the examples scripts
"gallery_dirs": "generated/examples", # path to save gallery generated examples
"reference_url": {
"astropy": None,
"matplotlib": "https://matplotlib.org/stable/",
"numpy": "https://numpy.org/doc/stable/",
},
"abort_on_example_error": True,
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = [
"https://journals.aas.org/manuscript-preparation/",
"https://maia.usno.navy.mil/",
"https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer",
"https://aa.usno.navy.mil/publications/docs/Circular_179.php",
"http://data.astropy.org",
"https://doi.org/", # CI blocked by service provider
"https://ui.adsabs.harvard.edu", # CI blocked by service provider
"https://www.tandfonline.com/", # 403 Client Error: Forbidden
"https://physics.nist.gov/", # SSL: CERTIFICATE_VERIFY_FAILED
"https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst
r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+",
]
linkcheck_timeout = 180
linkcheck_anchors = False
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs."""
# Make sure we're outputting HTML
if app.builder.format != "html":
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context
)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get("reftarget") # str or None
if str(reftarget).startswith("astropy:"):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, "astropy:"
elif dev and str(reftarget).startswith("astropy-dev:"):
process, replace = True, "astropy-dev:"
else:
process, replace = False, ""
# make link local
if process:
reftype = node.get("reftype")
refdoc = node.get("refdoc", app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patterns, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, "")
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node["refdomain"]]
return domain.resolve_xref(
app.env, refdoc, app.builder, reftype, reftarget, node, contnode
)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = (
"The sphinx_gallery extension is not installed, so the "
"gallery will not be built. You will probably see "
"additional warnings about undefined references due "
"to this."
)
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
|
76104fa698fb5ad8aa9126e643b06fdb2472667deaed34bc0aeec310cc235856 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for dealing with circular statistics, for
instance, mean, variance, standard deviation, correlation coefficient, and so
on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests.
The Maximum Likelihood Estimator for the Von Mises distribution along with the
Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations
are based on reference [1]_, which is also the basis for the R package
'CircStats' [2]_.
"""
import numpy as np
from astropy.units import Quantity
__all__ = [
"circmean",
"circstd",
"circvar",
"circmoment",
"circcorrcoef",
"rayleightest",
"vtest",
"vonmisesmle",
]
__doctest_requires__ = {"vtest": ["scipy"]}
def _components(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized rectangular components
# of the circular data.
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError("Weights and data have inconsistent shape.")
C = np.sum(weights * np.cos(p * (data - phi)), axis) / np.sum(weights, axis)
S = np.sum(weights * np.sin(p * (data - phi)), axis) / np.sum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample mean angle
C, S = _components(data, p, phi, axis, weights)
# theta will be an angle in the interval [-np.pi, np.pi)
# [-180, 180)*u.deg in case data is a Quantity
theta = np.arctan2(S, C)
if isinstance(data, Quantity):
theta = theta.to(data.unit)
return theta
def _length(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample length
C, S = _components(data, p, phi, axis, weights)
return np.hypot(S, C)
def circmean(data, axis=None, weights=None):
"""Computes the circular mean angle of an array of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : ndarray or `~astropy.units.Quantity`
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights)
def circvar(data, axis=None, weights=None):
"""Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
Dimensionless, if Quantity.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
For Scipy < 1.9.0, ``scipy.stats.circvar`` uses a different
definition based on an approximation using the limit of small
angles that approaches the linear variance. For Scipy >= 1.9.0,
``scipy.stats.cirvar`` uses a definition consistent with this
implementation.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights)
def circstd(data, axis=None, weights=None, method="angular"):
"""Computes the circular standard deviation of an array of circular data.
The standard deviation implemented here is based on the definitions given
by [1]_, which is also the same used by the R package 'CirStat' [2]_.
Two methods are implemented: 'angular' and 'circular'. The former is
defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The
latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf].
Following 'CircStat' the default method used to obtain the standard
deviation is 'angular'.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
If quantity, must be dimensionless.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [3]_, remark 1.4, page 22,
for detailed explanation.
method : str, optional
The method used to estimate the standard deviation:
- 'angular' : obtains the angular deviation
- 'circular' : obtains the circular deviation
Returns
-------
circstd : ndarray or `~astropy.units.Quantity` ['dimensionless']
Angular or circular standard deviation.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data) # doctest: +FLOAT_CMP
<Quantity 0.57195022>
Alternatively, using the 'circular' method:
>>> import numpy as np
>>> from astropy.stats import circstd
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circstd(data, method='circular') # doctest: +FLOAT_CMP
<Quantity 0.59766999>
References
----------
.. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics".
Journal of Statistical Software, vol 31, issue 10, 2009.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
"""
if method not in ("angular", "circular"):
raise ValueError("method should be either 'angular' or 'circular'")
if method == "angular":
return np.sqrt(2.0 * (1.0 - _length(data, 1, 0.0, axis, weights)))
else:
return np.sqrt(-2.0 * np.log(_length(data, 1, 0.0, axis, weights)))
def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
"""Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : bool, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : ndarray or `~astropy.units.Quantity`
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights)
def circcorrcoef(alpha, beta, axis=None, weights_alpha=None, weights_beta=None):
"""Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : ndarray or `~astropy.units.Quantity` ['dimensionless']
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if np.size(alpha, axis) != np.size(beta, axis):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a * sin_b) / np.sqrt(np.sum(sin_a * sin_a) * np.sum(sin_b * sin_b))
return rho
def rayleightest(data, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n * Rbar * Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if n < 50:
tmp = (
1.0
+ (2.0 * z - z * z) / (4.0 * n)
- (24.0 * z - 132.0 * z**2.0 + 76.0 * z**3.0 - 9.0 * z**4.0)
/ (288.0 * n * n)
)
p_value = np.exp(-z) * tmp
return p_value
def vtest(data, mu=0.0, axis=None, weights=None):
"""Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or `~astropy.units.Quantity` ['angle'], optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or `~astropy.units.Quantity` ['dimensionless']
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError("Weights and data have inconsistent shape.")
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis) / np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = (
1
- pz
+ fz
* (
(3 * z - z**3) / (16.0 * n)
+ (15 * z + 305 * z**3 - 125 * z**5 + 9 * z**7) / (4608.0 * n * n)
)
)
return p_value
def _A1inv(x):
# Approximation for _A1inv(x) according R Package 'CircStats'
# See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4)
kappa1 = np.where(
np.logical_and(0 <= x, x < 0.53), 2.0 * x + x * x * x + (5.0 * x**5) / 6.0, 0
)
kappa2 = np.where(
np.logical_and(0.53 <= x, x < 0.85), -0.4 + 1.39 * x + 0.43 / (1.0 - x), 0
)
kappa3 = np.where(
np.logical_or(x < 0, 0.85 <= x), 1.0 / (x * x * x - 4.0 * x * x + 3.0 * x), 0
)
return kappa1 + kappa2 + kappa3
def vonmisesmle(data, axis=None, weights=None):
"""Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : ndarray or `~astropy.units.Quantity`
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
mu : float or `~astropy.units.Quantity`
The mean (aka location parameter).
kappa : float or `~astropy.units.Quantity` ['dimensionless']
The concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=axis, weights=weights)
kappa = _A1inv(_length(data, p=1, phi=0.0, axis=axis, weights=weights))
return mu, kappa
|
ca25f89e1f0562ed0980c50d75720c1aee16b806760d41d089645e7841562612 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <https://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <https://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <https://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<https://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
718678c3a4213632c608342f7873654a5ed8330adaf7d715c8690836d4818fcf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
This module defines classes that deal with parameters.
It is unlikely users will need to work with these classes directly,
unless they define their own models.
"""
import functools
import numbers
import operator
import numpy as np
from astropy.units import MagUnit, Quantity
from astropy.utils import isiterable
from .utils import array_repr_oneline, get_inputs_and_params
__all__ = ["Parameter", "InputParameterError", "ParameterError"]
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array."""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
f"Parameter of {type(value)} could not be converted to float"
)
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean"
)
else:
raise InputParameterError(
f"Don't know how to convert parameter of {type(value)} to float"
)
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter:
"""
Wraps individual parameters.
Since 4.0 Parameters are no longer descriptors and are based on a new
implementation of the Parameter class. Parameters now (as of 4.0) store
values locally (as instead previously in the associated model)
This class represents a model's parameter (in a somewhat broad sense). It
serves a number of purposes:
1) A type to be recognized by models and treated specially at class
initialization (i.e., if it is found that there is a class definition
of a Parameter, the model initializer makes a copy at the instance level).
2) Managing the handling of allowable parameter values and once defined,
ensuring updates are consistent with the Parameter definition. This
includes the optional use of units and quantities as well as transforming
values to an internally consistent representation (e.g., from degrees to
radians through the use of getters and setters).
3) Holding attributes of parameters relevant to fitting, such as whether
the parameter may be varied in fitting, or whether there are constraints
that must be satisfied.
See :ref:`astropy:modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable
a function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (eg. a
parameter may be stored internally as radians but returned to the
user as degrees). The internal value is what is used for computations
while the proxy value is what users will interact with (passing and viewing).
setter : callable
a function that wraps any values assigned to this parameter; should
be the inverse of getter
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
mag : bool
Specify if the unit of the parameter can be a Magnitude unit or not
"""
constraints = ("fixed", "tied", "bounds")
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple). 'prior' and 'posterior'
are available for use by user fitters but are not used by any built-in
fitters as of this writing.
"""
def __init__(
self,
name="",
description="",
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
mag=False,
):
super().__init__()
self._model = None
self._model_required = False
self._setter = self._create_value_wrapper(setter, None)
self._getter = self._create_value_wrapper(getter, None)
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
f"parameter default {default} does not have units equivalent to "
f"the required unit {unit}"
)
unit = default.unit
default = default.value
self._default = default
self._mag = mag
self._set_unit(unit, force=True)
# Internal units correspond to raw_units held by the model in the
# previous implementation. The private _getter and _setter methods
# use this to convert to and from the public unit defined for the
# parameter.
self._internal_unit = None
if not self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
"bounds may not be specified simultaneously with min or "
f"max when instantiating Parameter {name}"
)
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._order = None
self._validator = None
self._prior = prior
self._posterior = posterior
self._std = None
def __set_name__(self, owner, name):
self._name = name
def __len__(self):
val = self.value
if val.shape == ():
return 1
else:
return val.shape[0]
def __getitem__(self, key):
value = self.value
if len(value.shape) == 0:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
f"'{self.name}'"
)
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
f"Input dimension {key} invalid for {self.name!r} parameter with "
f"dimension {value.shape[0]}"
) # likely wrong
def __repr__(self):
args = f"'{self._name}'"
args += f", value={self.value}"
if self.unit is not None:
args += f", unit={self.unit}"
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += f", {cons}={val}"
return f"{self.__class__.__name__}({args})"
@property
def name(self):
"""Parameter name."""
return self._name
@property
def default(self):
"""Parameter default value."""
return self._default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._getter is None and self._setter is None:
return np.float64(self._value)
else:
# This new implementation uses the names of internal_unit
# in place of raw_unit used previously. The contrast between
# internal values and units is that between the public
# units that the parameter advertises to what it actually
# uses internally.
if self.internal_unit:
return np.float64(
self._getter(
self._internal_value, self.internal_unit, self.unit
).value
)
elif self._getter:
return np.float64(self._getter(self._internal_value))
elif self._setter:
return np.float64(self._internal_value)
@value.setter
def value(self, value):
if isinstance(value, Quantity):
raise TypeError(
"The .value property on parameters should be set"
" to unitless values, not Quantity objects. To set"
"a parameter to a quantity simply set the "
"parameter directly without using .value"
)
if self._setter is None:
self._value = np.array(value, dtype=np.float64)
else:
self._internal_value = np.array(self._setter(value), dtype=np.float64)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
return self._unit
@unit.setter
def unit(self, unit):
if self.unit is None:
raise ValueError(
"Cannot attach units to parameters that were "
"not initially specified with units"
)
else:
raise ValueError(
"Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity"
)
def _set_unit(self, unit, force=False):
if force:
if isinstance(unit, MagUnit) and not self._mag:
raise ValueError(
"This parameter does not support the magnitude units such as"
f" {unit}"
)
self._unit = unit
else:
self.unit = unit
@property
def internal_unit(self):
"""
Return the internal unit the parameter uses for the internal value stored.
"""
return self._internal_unit
@internal_unit.setter
def internal_unit(self, internal_unit):
"""
Set the unit the parameter will convert the supplied value to the
representation used internally.
"""
self._internal_unit = internal_unit
@property
def input_unit(self):
"""Unit for the input value."""
if self.internal_unit is not None:
return self.internal_unit
elif self.unit is not None:
return self.unit
else:
return None
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is None:
return None
return self.value * self.unit
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError(
"The .quantity attribute should be set to a Quantity object"
)
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._setter is None:
return self._value.shape
return self._internal_value.shape
@shape.setter
def shape(self, value):
if isinstance(self.value, np.generic):
if value not in ((), (1,)):
raise ValueError("Cannot assign this shape to a scalar quantity")
else:
self.value.shape = value
@property
def size(self):
"""The size of this parameter's value array."""
return np.size(self.value)
@property
def std(self):
"""Standard deviation, if available from fit."""
return self._std
@std.setter
def std(self, value):
self._std = value
@property
def prior(self):
return self._prior
@prior.setter
def prior(self, val):
self._prior = val
@property
def posterior(self):
return self._posterior
@posterior.setter
def posterior(self, val):
self._posterior = val
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter."""
if not isinstance(value, bool):
raise ValueError("Value must be boolean")
self._fixed = value
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter."""
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable or set to False or None")
self._tied = value
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple."""
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple."""
_min, _max = value
if _min is not None:
if not isinstance(_min, (numbers.Number, Quantity)):
raise TypeError("Min value must be a number or a Quantity")
if isinstance(_min, Quantity):
_min = float(_min.value)
else:
_min = float(_min)
if _max is not None:
if not isinstance(_max, (numbers.Number, Quantity)):
raise TypeError("Max value must be a number or a Quantity")
if isinstance(_max, Quantity):
_max = float(_max.value)
else:
_max = float(_max)
self._bounds = (_min, _max)
@property
def min(self):
"""A value used as a lower bound when fitting a parameter."""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter."""
self.bounds = (value, self.max)
@property
def max(self):
"""A value used as an upper bound when fitting a parameter."""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
self.bounds = (self.min, value)
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
"""
def validator(func, self=self):
if callable(func):
self._validator = func
return self
else:
raise ValueError(
"This decorator method expects a callable.\n"
"The use of this method as a direct validator is\n"
"deprecated; use the new validate method instead\n"
)
return validator
def validate(self, value):
"""Run the validator on this parameter."""
if self._validator is not None and self._model is not None:
self._validator(self._model, value)
def copy(
self,
name=None,
description=None,
default=None,
unit=None,
getter=None,
setter=None,
fixed=False,
tied=False,
min=None,
max=None,
bounds=None,
prior=None,
posterior=None,
):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs["self"]
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ("min", "max"):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, "_" + key):
value = getattr(self, "_" + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def model(self):
"""Return the model this parameter is associated with."""
return self._model
@model.setter
def model(self, value):
self._model = value
self._setter = self._create_value_wrapper(self._setter, value)
self._getter = self._create_value_wrapper(self._getter, value)
if self._model_required:
if self._default is not None:
self.value = self._default
else:
self._value = None
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
if self._setter:
return self._internal_value
return self.value
def _create_value_wrapper(self, wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError(
"A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument"
)
return _wrap_ufunc(wrapper)
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, _ = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
self._model_required = True
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError(
"Parameter getter/setter must be a function "
"of either one or two arguments"
)
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False, subok=True)
return arr
def __bool__(self):
return bool(np.all(self.value))
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = f"{out} {param.unit!s}"
return out
def _wrap_ufunc(ufunc):
def _wrapper(value, raw_unit=None, orig_unit=None):
"""
Wrap ufuncs to support passing in units
raw_unit is the unit of the value
orig_unit is the value after the ufunc has been applied
it is assumed ufunc(raw_unit) == orig_unit
"""
if orig_unit is not None:
return ufunc(value) * orig_unit
elif raw_unit is not None:
return ufunc(value * raw_unit)
return ufunc(value)
return _wrapper
|
d811a2b8680febb111a4890ecc8d9367940a1b206b49fefb517bdfd1730dc6b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module is to contain an improved bounding box.
"""
from __future__ import annotations
import abc
import copy
import warnings
from collections import namedtuple
from typing import Any, Callable
import numpy as np
from astropy.units import Quantity
from astropy.utils import isiterable
__all__ = ["ModelBoundingBox", "CompoundBoundingBox"]
_BaseInterval = namedtuple("_BaseInterval", "lower upper")
class _Interval(_BaseInterval):
"""
A single input's bounding box interval.
Parameters
----------
lower : float
The lower bound of the interval
upper : float
The upper bound of the interval
Methods
-------
validate :
Constructs a valid interval
outside :
Determine which parts of an input array are outside the interval.
domain :
Constructs a discretization of the points inside the interval.
"""
def __repr__(self):
return f"Interval(lower={self.lower}, upper={self.upper})"
def copy(self):
return copy.deepcopy(self)
@staticmethod
def _validate_shape(interval):
"""Validate the shape of an interval representation."""
MESSAGE = """An interval must be some sort of sequence of length 2"""
try:
shape = np.shape(interval)
except TypeError:
try:
# np.shape does not work with lists of Quantities
if len(interval) == 1:
interval = interval[0]
shape = np.shape([b.to_value() for b in interval])
except (ValueError, TypeError, AttributeError):
raise ValueError(MESSAGE)
valid_shape = shape in ((2,), (1, 2), (2, 0))
if not valid_shape:
valid_shape = (
len(shape) > 0
and shape[0] == 2
and all(isinstance(b, np.ndarray) for b in interval)
)
if not isiterable(interval) or not valid_shape:
raise ValueError(MESSAGE)
@classmethod
def _validate_bounds(cls, lower, upper):
"""Validate the bounds are reasonable and construct an interval from them."""
if (np.asanyarray(lower) > np.asanyarray(upper)).all():
warnings.warn(
f"Invalid interval: upper bound {upper} "
f"is strictly less than lower bound {lower}.",
RuntimeWarning,
)
return cls(lower, upper)
@classmethod
def validate(cls, interval):
"""
Construct and validate an interval.
Parameters
----------
interval : iterable
A representation of the interval.
Returns
-------
A validated interval.
"""
cls._validate_shape(interval)
if len(interval) == 1:
interval = tuple(interval[0])
else:
interval = tuple(interval)
return cls._validate_bounds(interval[0], interval[1])
def outside(self, _input: np.ndarray):
"""
Parameters
----------
_input : np.ndarray
The evaluation input in the form of an array.
Returns
-------
Boolean array indicating which parts of _input are outside the interval:
True -> position outside interval
False -> position inside interval
"""
return np.logical_or(_input < self.lower, _input > self.upper)
def domain(self, resolution):
return np.arange(self.lower, self.upper + resolution, resolution)
# The interval where all ignored inputs can be found.
_ignored_interval = _Interval.validate((-np.inf, np.inf))
def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(
f"Integer key: {key} must be non-negative and < {len(model.inputs)}."
)
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index
def get_name(model, index: int):
"""Get the input name corresponding to the input index."""
return model.inputs[index]
class _BoundingDomain(abc.ABC):
"""
Base class for ModelBoundingBox and CompoundBoundingBox.
This is where all the `~astropy.modeling.core.Model` evaluation
code for evaluating with a bounding box is because it is common
to both types of bounding box.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this bounding domain is for.
prepare_inputs :
Generates the necessary input information so that model can
be evaluated only for input points entirely inside bounding_box.
This needs to be implemented by a subclass. Note that most of
the implementation is in ModelBoundingBox.
prepare_outputs :
Fills the output values in for any input points outside the
bounding_box.
evaluate :
Performs a complete model evaluation while enforcing the bounds
on the inputs and returns a complete output.
"""
def __init__(self, model, ignored: list[int] = None, order: str = "C"):
self._model = model
self._ignored = self._validate_ignored(ignored)
self._order = self._get_order(order)
@property
def model(self):
return self._model
@property
def order(self) -> str:
return self._order
@property
def ignored(self) -> list[int]:
return self._ignored
def _get_order(self, order: str = None) -> str:
"""
Get if bounding_box is C/python ordered or Fortran/mathematically
ordered.
"""
if order is None:
order = self._order
if order not in ("C", "F"):
raise ValueError(
"order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}."
)
return order
def _get_index(self, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
return get_index(self._model, key)
def _get_name(self, index: int):
"""Get the input name corresponding to the input index."""
return get_name(self._model, index)
@property
def ignored_inputs(self) -> list[str]:
return [self._get_name(index) for index in self._ignored]
def _validate_ignored(self, ignored: list) -> list[int]:
if ignored is None:
return []
else:
return [self._get_index(key) for key in ignored]
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters."
)
@abc.abstractmethod
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
raise NotImplementedError("This should be implemented by a child class.")
@abc.abstractmethod
def prepare_inputs(self, input_shape, inputs) -> tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
raise NotImplementedError("This has not been implemented for BoundingDomain.")
@staticmethod
def _base_output(input_shape, fill_value):
"""
Create a baseline output, assuming that the entire input is outside
the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An array of the correct shape containing all fill_value
"""
return np.zeros(input_shape) + fill_value
def _all_out_output(self, input_shape, fill_value):
"""
Create output if all inputs are outside the domain.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
A full set of outputs for case that all inputs are outside domain.
"""
return [
self._base_output(input_shape, fill_value)
for _ in range(self._model.n_outputs)
], None
def _modify_output(self, valid_output, valid_index, input_shape, fill_value):
"""
For a single output fill in all the parts corresponding to inputs
outside the bounding box.
Parameters
----------
valid_output : numpy array
The output from the model corresponding to inputs inside the
bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An output array with all the indices corresponding to inputs
outside the bounding box filled in by fill_value
"""
output = self._base_output(input_shape, fill_value)
if not output.shape:
output = np.array(valid_output)
else:
output[valid_index] = valid_output
if np.isscalar(valid_output):
output = output.item(0)
return output
def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
List of filled in output arrays.
"""
outputs = []
for valid_output in valid_outputs:
outputs.append(
self._modify_output(valid_output, valid_index, input_shape, fill_value)
)
return outputs
def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box, adjusting any single output model so that
its output becomes a list of containing that output.
Parameters
----------
valid_outputs : list
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : array_like
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
if self._model.n_outputs == 1:
valid_outputs = [valid_outputs]
return self._prepare_outputs(
valid_outputs, valid_index, input_shape, fill_value
)
@staticmethod
def _get_valid_outputs_unit(valid_outputs, with_units: bool):
"""
Get the unit for outputs if one is required.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
with_units : bool
whether or not a unit is required
"""
if with_units:
return getattr(valid_outputs, "unit", None)
def _evaluate_model(
self,
evaluate: Callable,
valid_inputs,
valid_index,
input_shape,
fill_value,
with_units: bool,
):
"""
Evaluate the model using the given evaluate routine.
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_outputs = evaluate(valid_inputs)
valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units)
return (
self.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value),
valid_outputs_unit,
)
def _evaluate(
self, evaluate: Callable, inputs, input_shape, fill_value, with_units: bool
):
"""Evaluate model with steps: prepare_inputs -> evaluate -> prepare_outputs.
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs)
if all_out:
return self._all_out_output(input_shape, fill_value)
else:
return self._evaluate_model(
evaluate, valid_inputs, valid_index, input_shape, fill_value, with_units
)
@staticmethod
def _set_outputs_unit(outputs, valid_outputs_unit):
"""
Set the units on the outputs
prepare_inputs -> evaluate -> prepare_outputs -> set output units.
Parameters
----------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
Returns
-------
List containing filled in output values and units
"""
if valid_outputs_unit is not None:
return Quantity(outputs, valid_outputs_unit, copy=False, subok=True)
return outputs
def evaluate(self, evaluate: Callable, inputs, fill_value):
"""
Perform full model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs -> set output units.
Parameters
----------
evaluate : callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
input_shape = self._model.input_shape(inputs)
# NOTE: CompoundModel does not currently support units during
# evaluation for bounding_box so this feature is turned off
# for CompoundModel(s).
outputs, valid_outputs_unit = self._evaluate(
evaluate, inputs, input_shape, fill_value, self._model.bbox_with_units
)
return tuple(self._set_outputs_unit(outputs, valid_outputs_unit))
class ModelBoundingBox(_BoundingDomain):
"""
A model's bounding box.
Parameters
----------
intervals : dict
A dictionary containing all the intervals for each model input
keys -> input index
values -> interval for that index
model : `~astropy.modeling.Model`
The Model this bounding_box is for.
ignored : list
A list containing all the inputs (index) which will not be
checked for whether or not their elements are in/out of an interval.
order : optional, str
The ordering that is assumed for the tuple representation of this
bounding_box. Options: 'C': C/Python order, e.g. z, y, x.
(default), 'F': Fortran/mathematical notation order, e.g. x, y, z.
"""
def __init__(
self,
intervals: dict[int, _Interval],
model,
ignored: list[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._intervals = {}
if intervals != () and intervals != {}:
self._validate(intervals, order=order)
def copy(self, ignored=None):
intervals = {
index: interval.copy() for index, interval in self._intervals.items()
}
if ignored is None:
ignored = self._ignored.copy()
return ModelBoundingBox(
intervals, self._model, ignored=ignored, order=self._order
)
@property
def intervals(self) -> dict[int, _Interval]:
"""Return bounding_box labeled using input positions."""
return self._intervals
@property
def named_intervals(self) -> dict[str, _Interval]:
"""Return bounding_box labeled using input names."""
return {self._get_name(index): bbox for index, bbox in self._intervals.items()}
def __repr__(self):
parts = ["ModelBoundingBox(", " intervals={"]
for name, interval in self.named_intervals.items():
parts.append(f" {name}: {interval}")
parts.append(" }")
if len(self._ignored) > 0:
parts.append(f" ignored={self.ignored_inputs}")
parts.append(
f" model={self._model.__class__.__name__}(inputs={self._model.inputs})"
)
parts.append(f" order='{self._order}'")
parts.append(")")
return "\n".join(parts)
def __len__(self):
return len(self._intervals)
def __contains__(self, key):
try:
return self._get_index(key) in self._intervals or self._ignored
except (IndexError, ValueError):
return False
def has_interval(self, key):
return self._get_index(key) in self._intervals
def __getitem__(self, key):
"""Get bounding_box entries by either input name or input index."""
index = self._get_index(key)
if index in self._ignored:
return _ignored_interval
else:
return self._intervals[self._get_index(key)]
def bounding_box(self, order: str = None):
"""
Return the old tuple of tuples representation of the bounding_box
order='C' corresponds to the old bounding_box ordering
order='F' corresponds to the gwcs bounding_box ordering.
"""
if len(self._intervals) == 1:
return tuple(list(self._intervals.values())[0])
else:
order = self._get_order(order)
inputs = self._model.inputs
if order == "C":
inputs = inputs[::-1]
bbox = tuple(tuple(self[input_name]) for input_name in inputs)
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def __eq__(self, value):
"""Note equality can be either with old representation or new one."""
if isinstance(value, tuple):
return self.bounding_box() == value
elif isinstance(value, ModelBoundingBox):
return (self.intervals == value.intervals) and (
self.ignored == value.ignored
)
else:
return False
def __setitem__(self, key, value):
"""Validate and store interval under key (input index or input name)."""
index = self._get_index(key)
if index in self._ignored:
self._ignored.remove(index)
self._intervals[index] = _Interval.validate(value)
def __delitem__(self, key):
"""Delete stored interval."""
index = self._get_index(key)
if index in self._ignored:
raise RuntimeError(f"Cannot delete ignored input: {key}!")
del self._intervals[index]
self._ignored.append(index)
def _validate_dict(self, bounding_box: dict):
"""Validate passing dictionary of intervals and setting them."""
for key, value in bounding_box.items():
self[key] = value
@property
def _available_input_index(self):
model_input_index = [self._get_index(_input) for _input in self._model.inputs]
return [_input for _input in model_input_index if _input not in self._ignored]
def _validate_sequence(self, bounding_box, order: str = None):
"""
Validate passing tuple of tuples representation (or related) and setting them.
"""
order = self._get_order(order)
if order == "C":
# If bounding_box is C/python ordered, it needs to be reversed
# to be in Fortran/mathematical/input order.
bounding_box = bounding_box[::-1]
for index, value in enumerate(bounding_box):
self[self._available_input_index[index]] = value
@property
def _n_inputs(self) -> int:
n_inputs = self._model.n_inputs - len(self._ignored)
if n_inputs > 0:
return n_inputs
else:
return 0
def _validate_iterable(self, bounding_box, order: str = None):
"""Validate and set any iterable representation."""
if len(bounding_box) != self._n_inputs:
raise ValueError(
f"Found {len(bounding_box)} intervals, "
f"but must have exactly {self._n_inputs}."
)
if isinstance(bounding_box, dict):
self._validate_dict(bounding_box)
else:
self._validate_sequence(bounding_box, order)
def _validate(self, bounding_box, order: str = None):
"""Validate and set any representation."""
if self._n_inputs == 1 and not isinstance(bounding_box, dict):
self[self._available_input_index[0]] = bounding_box
else:
self._validate_iterable(bounding_box, order)
@classmethod
def validate(
cls,
model,
bounding_box,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwargs,
):
"""
Construct a valid bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict, tuple
A possible representation of the bounding box
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, ModelBoundingBox):
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.named_intervals
new = cls({}, model, ignored=ignored, order=order)
new._validate(bounding_box)
return new
def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
keep_ignored : bool
Keep the ignored inputs of the bounding box (internal argument only)
"""
new = self.copy()
for _input in fixed_inputs.keys():
del new[_input]
if _keep_ignored:
ignored = new.ignored
else:
ignored = None
return ModelBoundingBox.validate(
model, new.named_intervals, ignored=ignored, order=new._order
)
@property
def dimension(self):
return len(self)
def domain(self, resolution, order: str = None):
inputs = self._model.inputs
order = self._get_order(order)
if order == "C":
inputs = inputs[::-1]
return [self[input_name].domain(resolution) for input_name in inputs]
def _outside(self, input_shape, inputs):
"""
Get all the input positions which are outside the bounding_box,
so that the corresponding outputs can be filled with the fill
value (default NaN).
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
outside_index : bool-numpy array
True -> position outside bounding_box
False -> position inside bounding_box
all_out : bool
if all of the inputs are outside the bounding_box
"""
all_out = False
outside_index = np.zeros(input_shape, dtype=bool)
for index, _input in enumerate(inputs):
_input = np.asanyarray(_input)
outside = np.broadcast_to(self[index].outside(_input), input_shape)
outside_index[outside] = True
if outside_index.all():
all_out = True
break
return outside_index, all_out
def _valid_index(self, input_shape, inputs):
"""
Get the indices of all the inputs inside the bounding_box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_index : numpy array
array of all indices inside the bounding box
all_out : bool
if all of the inputs are outside the bounding_box
"""
outside_index, all_out = self._outside(input_shape, inputs)
valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero()
if len(valid_index[0]) == 0:
all_out = True
return valid_index, all_out
def prepare_inputs(self, input_shape, inputs) -> tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
valid_index, all_out = self._valid_index(input_shape, inputs)
valid_inputs = []
if not all_out:
for _input in inputs:
if input_shape:
valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[
valid_index
]
if np.isscalar(_input):
valid_input = valid_input.item(0)
valid_inputs.append(valid_input)
else:
valid_inputs.append(_input)
return tuple(valid_inputs), valid_index, all_out
_BaseSelectorArgument = namedtuple("_BaseSelectorArgument", "index ignore")
class _SelectorArgument(_BaseSelectorArgument):
"""
Contains a single CompoundBoundingBox slicing input.
Parameters
----------
index : int
The index of the input in the input list
ignore : bool
Whether or not this input will be ignored by the bounding box.
Methods
-------
validate :
Returns a valid SelectorArgument for a given model.
get_selector :
Returns the value of the input for use in finding the correct
bounding_box.
get_fixed_value :
Gets the slicing value from a fix_inputs set of values.
"""
def __new__(cls, index, ignore):
self = super().__new__(cls, index, ignore)
return self
@classmethod
def validate(cls, model, argument, ignored: bool = True):
"""
Construct a valid selector argument for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be an argument for.
argument : int or str
A representation of which evaluation input to use
ignored : optional, bool
Whether or not to ignore this argument in the ModelBoundingBox.
Returns
-------
Validated selector_argument
"""
return cls(get_index(model, argument), ignored)
def get_selector(self, *inputs):
"""
Get the selector value corresponding to this argument.
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
_selector = inputs[self.index]
if isiterable(_selector):
if len(_selector) == 1:
return _selector[0]
else:
return tuple(_selector)
return _selector
def name(self, model) -> str:
"""
Get the name of the input described by this selector argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return get_name(model, self.index)
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return f"Argument(name='{self.name(model)}', ignore={self.ignore})"
def get_fixed_value(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
values : dict
Dictionary of fixed inputs.
"""
if self.index in values:
return values[self.index]
else:
if self.name(model) in values:
return values[self.name(model)]
else:
raise RuntimeError(
f"{self.pretty_repr(model)} was not found in {values}"
)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is described by this selector argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
argument : int or str
A representation of which evaluation input is being used
"""
return self.index == get_index(model, argument)
def named_tuple(self, model):
"""
Get a tuple representation of this argument using the input
name from the model.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return (self.name(model), self.ignore)
class _SelectorArguments(tuple):
"""
Contains the CompoundBoundingBox slicing description.
Parameters
----------
input_ :
The SelectorArgument values
Methods
-------
validate :
Returns a valid SelectorArguments for its model.
get_selector :
Returns the selector a set of inputs corresponds to.
is_selector :
Determines if a selector is correctly formatted for this CompoundBoundingBox.
get_fixed_value :
Gets the selector from a fix_inputs set of values.
"""
_kept_ignore = None
def __new__(cls, input_: tuple[_SelectorArgument], kept_ignore: list = None):
self = super().__new__(cls, input_)
if kept_ignore is None:
self._kept_ignore = []
else:
self._kept_ignore = kept_ignore
return self
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
parts = ["SelectorArguments("]
for argument in self:
parts.append(f" {argument.pretty_repr(model)}")
parts.append(")")
return "\n".join(parts)
@property
def ignore(self):
"""Get the list of ignored inputs."""
ignore = [argument.index for argument in self if argument.ignore]
ignore.extend(self._kept_ignore)
return ignore
@property
def kept_ignore(self):
"""The arguments to persist in ignoring."""
return self._kept_ignore
@classmethod
def validate(cls, model, arguments, kept_ignore: list = None):
"""
Construct a valid Selector description for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
arguments :
The individual argument information
kept_ignore :
Arguments to persist as ignored
"""
inputs = []
for argument in arguments:
_input = _SelectorArgument.validate(model, *argument)
if _input.index in [this.index for this in inputs]:
raise ValueError(
f"Input: '{get_name(model, _input.index)}' has been repeated."
)
inputs.append(_input)
if len(inputs) == 0:
raise ValueError("There must be at least one selector argument.")
if isinstance(arguments, _SelectorArguments):
if kept_ignore is None:
kept_ignore = []
kept_ignore.extend(arguments.kept_ignore)
return cls(tuple(inputs), kept_ignore)
def get_selector(self, *inputs):
"""
Get the selector corresponding to these inputs.
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
return tuple(argument.get_selector(*inputs) for argument in self)
def is_selector(self, _selector):
"""
Determine if this is a reasonable selector.
Parameters
----------
_selector : tuple
The selector to check
"""
return isinstance(_selector, tuple) and len(_selector) == len(self)
def get_fixed_values(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
values : dict
Dictionary of fixed inputs.
"""
return tuple(argument.get_fixed_value(model, values) for argument in self)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is one of the selector arguments.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which evaluation input is being used
"""
return any(selector_arg.is_argument(model, argument) for selector_arg in self)
def selector_index(self, model, argument):
"""
Get the index of the argument passed in the selector tuples.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
for index, selector_arg in enumerate(self):
if selector_arg.is_argument(model, argument):
return index
else:
raise ValueError(
f"{argument} does not correspond to any selector argument."
)
def reduce(self, model, argument):
"""
Reduce the selector arguments by the argument given.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
arguments = list(self)
kept_ignore = [arguments.pop(self.selector_index(model, argument)).index]
kept_ignore.extend(self._kept_ignore)
return _SelectorArguments.validate(model, tuple(arguments), kept_ignore)
def add_ignore(self, model, argument):
"""
Add argument to the kept_ignore list.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
if self.is_argument(model, argument):
raise ValueError(
f"{argument}: is a selector argument and cannot be ignored."
)
kept_ignore = [get_index(model, argument)]
return _SelectorArguments.validate(model, self, kept_ignore)
def named_tuple(self, model):
"""
Get a tuple of selector argument tuples using input names.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
return tuple(selector_arg.named_tuple(model) for selector_arg in self)
class CompoundBoundingBox(_BoundingDomain):
"""
A model's compound bounding box.
Parameters
----------
bounding_boxes : dict
A dictionary containing all the ModelBoundingBoxes that are possible
keys -> _selector (extracted from model inputs)
values -> ModelBoundingBox
model : `~astropy.modeling.Model`
The Model this compound bounding_box is for.
selector_args : _SelectorArguments
A description of how to extract the selectors from model inputs.
create_selector : optional
A method which takes in the selector and the model to return a
valid bounding corresponding to that selector. This can be used
to construct new bounding_boxes for previously undefined selectors.
These new boxes are then stored for future lookups.
order : optional, str
The ordering that is assumed for the tuple representation of the
bounding_boxes.
"""
def __init__(
self,
bounding_boxes: dict[Any, ModelBoundingBox],
model,
selector_args: _SelectorArguments,
create_selector: Callable = None,
ignored: list[int] = None,
order: str = "C",
):
super().__init__(model, ignored, order)
self._create_selector = create_selector
self._selector_args = _SelectorArguments.validate(model, selector_args)
self._bounding_boxes = {}
self._validate(bounding_boxes)
def copy(self):
bounding_boxes = {
selector: bbox.copy(self.selector_args.ignore)
for selector, bbox in self._bounding_boxes.items()
}
return CompoundBoundingBox(
bounding_boxes,
self._model,
selector_args=self._selector_args,
create_selector=copy.deepcopy(self._create_selector),
order=self._order,
)
def __repr__(self):
parts = ["CompoundBoundingBox(", " bounding_boxes={"]
# bounding_boxes
for _selector, bbox in self._bounding_boxes.items():
bbox_repr = bbox.__repr__().split("\n")
parts.append(f" {_selector} = {bbox_repr.pop(0)}")
for part in bbox_repr:
parts.append(f" {part}")
parts.append(" }")
# selector_args
selector_args_repr = self.selector_args.pretty_repr(self._model).split("\n")
parts.append(f" selector_args = {selector_args_repr.pop(0)}")
for part in selector_args_repr:
parts.append(f" {part}")
parts.append(")")
return "\n".join(parts)
@property
def bounding_boxes(self) -> dict[Any, ModelBoundingBox]:
return self._bounding_boxes
@property
def selector_args(self) -> _SelectorArguments:
return self._selector_args
@selector_args.setter
def selector_args(self, value):
self._selector_args = _SelectorArguments.validate(self._model, value)
warnings.warn(
"Overriding selector_args may cause problems you should re-validate "
"the compound bounding box before use!",
RuntimeWarning,
)
@property
def named_selector_tuple(self) -> tuple:
return self._selector_args.named_tuple(self._model)
@property
def create_selector(self):
return self._create_selector
@staticmethod
def _get_selector_key(key):
if isiterable(key):
return tuple(key)
else:
return (key,)
def __setitem__(self, key, value):
_selector = self._get_selector_key(key)
if not self.selector_args.is_selector(_selector):
raise ValueError(f"{_selector} is not a selector!")
ignored = self.selector_args.ignore + self.ignored
self._bounding_boxes[_selector] = ModelBoundingBox.validate(
self._model, value, ignored, order=self._order
)
def _validate(self, bounding_boxes: dict):
for _selector, bounding_box in bounding_boxes.items():
self[_selector] = bounding_box
def __eq__(self, value):
if isinstance(value, CompoundBoundingBox):
return (
self.bounding_boxes == value.bounding_boxes
and self.selector_args == value.selector_args
and self.create_selector == value.create_selector
)
else:
return False
@classmethod
def validate(
cls,
model,
bounding_box: dict,
selector_args=None,
create_selector=None,
ignored: list = None,
order: str = "C",
_preserve_ignore: bool = False,
**kwarg,
):
"""
Construct a valid compound bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict
Dictionary of possible bounding_box representations
selector_args : optional
Description of the selector arguments
create_selector : optional, callable
Method for generating new selectors
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, CompoundBoundingBox):
if selector_args is None:
selector_args = bounding_box.selector_args
if create_selector is None:
create_selector = bounding_box.create_selector
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.bounding_boxes
if selector_args is None:
raise ValueError(
"Selector arguments must be provided "
"(can be passed as part of bounding_box argument)"
)
return cls(
bounding_box,
model,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
def __contains__(self, key):
return key in self._bounding_boxes
def _create_bounding_box(self, _selector):
self[_selector] = self._create_selector(_selector, model=self._model)
return self[_selector]
def __getitem__(self, key):
_selector = self._get_selector_key(key)
if _selector in self:
return self._bounding_boxes[_selector]
elif self._create_selector is not None:
return self._create_bounding_box(_selector)
else:
raise RuntimeError(f"No bounding box is defined for selector: {_selector}.")
def _select_bounding_box(self, inputs) -> ModelBoundingBox:
_selector = self.selector_args.get_selector(*inputs)
return self[_selector]
def prepare_inputs(self, input_shape, inputs) -> tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
bounding_box = self._select_bounding_box(inputs)
return bounding_box.prepare_inputs(input_shape, inputs)
def _matching_bounding_boxes(self, argument, value) -> dict[Any, ModelBoundingBox]:
selector_index = self.selector_args.selector_index(self._model, argument)
matching = {}
for selector_key, bbox in self._bounding_boxes.items():
if selector_key[selector_index] == value:
new_selector_key = list(selector_key)
new_selector_key.pop(selector_index)
if bbox.has_interval(argument):
new_bbox = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
else:
new_bbox = bbox.copy()
matching[tuple(new_selector_key)] = new_bbox
if len(matching) == 0:
raise ValueError(
f"Attempting to fix input {argument}, but there are no "
f"bounding boxes for argument value {value}."
)
return matching
def _fix_input_selector_arg(self, argument, value):
matching_bounding_boxes = self._matching_bounding_boxes(argument, value)
if len(self.selector_args) == 1:
return matching_bounding_boxes[()]
else:
return CompoundBoundingBox(
matching_bounding_boxes,
self._model,
self.selector_args.reduce(self._model, argument),
)
def _fix_input_bbox_arg(self, argument, value):
bounding_boxes = {}
for selector_key, bbox in self._bounding_boxes.items():
bounding_boxes[selector_key] = bbox.fix_inputs(
self._model, {argument: value}, _keep_ignored=True
)
return CompoundBoundingBox(
bounding_boxes,
self._model,
self.selector_args.add_ignore(self._model, argument),
)
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
fixed_input_keys = list(fixed_inputs.keys())
argument = fixed_input_keys.pop()
value = fixed_inputs[argument]
if self.selector_args.is_argument(self._model, argument):
bbox = self._fix_input_selector_arg(argument, value)
else:
bbox = self._fix_input_bbox_arg(argument, value)
if len(fixed_input_keys) > 0:
new_fixed_inputs = fixed_inputs.copy()
del new_fixed_inputs[argument]
bbox = bbox.fix_inputs(model, new_fixed_inputs)
if isinstance(bbox, CompoundBoundingBox):
selector_args = bbox.named_selector_tuple
bbox_dict = bbox
elif isinstance(bbox, ModelBoundingBox):
selector_args = None
bbox_dict = bbox.named_intervals
return bbox.__class__.validate(
model, bbox_dict, order=bbox.order, selector_args=selector_args
)
|
c974544c5f5736ef5a533ba42f3809e21197a69c0a829fbb027c6787b1bb3bdf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import warnings
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = [
"AiryDisk2D",
"Moffat1D",
"Moffat2D",
"Box1D",
"Box2D",
"Const1D",
"Const2D",
"Ellipse2D",
"Disk2D",
"Gaussian1D",
"Gaussian2D",
"Linear1D",
"Lorentz1D",
"RickerWavelet1D",
"RickerWavelet2D",
"RedshiftScaleFactor",
"Multiply",
"Planar2D",
"Scale",
"Sersic1D",
"Sersic2D",
"Shift",
"Sine1D",
"Cosine1D",
"Tangent1D",
"ArcSine1D",
"ArcCosine1D",
"ArcTangent1D",
"Trapezoid1D",
"TrapezoidDisk2D",
"Ring2D",
"Voigt1D",
"KingProjectedAnalytic1D",
"Exponential1D",
"Logarithmic1D",
]
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Gaussian"
)
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian",
)
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev**2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.input_unit is None:
return None
return {self.inputs[0]: self.mean.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mean": inputs_unit[self.inputs[0]],
"stddev": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
x_unit = self.x_mean.input_unit
y_unit = self.y_mean.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.input_unit is None:
return None
return {self.inputs[0]: self.offset.input_unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function."""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.offset) for x in self.bounding_box
)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function."""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model."""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter."""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"offset": outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.input_unit is None:
return None
return {self.inputs[0]: self.factor.input_unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function."""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description="Redshift", default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function."""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative."""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model."""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.z) for x in self.bounding_box.bounding_box()
)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)
)
@property
def input_units(self):
if self.r_eff.input_unit is None:
return None
return {self.inputs[0]: self.r_eff.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_eff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.input_unit is None:
return None
return {self.inputs[0]: 1.0 / self.frequency.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": inputs_unit[self.inputs[0]] ** -1,
"amplitude": outputs_unit[self.outputs[0]],
}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative."""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (
TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine."""
return ArcSine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative."""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = -(
TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)
)
d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine."""
return ArcCosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models.
"""
@property
def input_units(self):
if self.amplitude.input_unit is None:
return None
return {self.inputs[0]: self.amplitude.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"frequency": outputs_unit[self.outputs[0]] ** -1,
"amplitude": inputs_unit[self.inputs[0]],
}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine."""
return Sine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative."""
d_amplitude = x / (
TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine."""
return Cosine1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative."""
d_amplitude = -x / (
TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2)
)
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = -1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent."""
return Tangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.input_unit is None and self.slope.input_unit is None:
return None
return {self.inputs[0]: self.intercept.input_unit / self.slope.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function."""
return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters."""
d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2)
d_x_0 = (
amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2)
)
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0, description="Position of the peak")
amplitude_L = Parameter(default=1, description="The Lorentzian amplitude")
fwhm_L = Parameter(
default=2 / np.pi, description="The Lorentzian full width at half maximum"
)
fwhm_G = Parameter(
default=np.log(2), description="The Gaussian full width at half maximum"
)
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(
self,
x_0=x_0.default,
amplitude_L=amplitude_L.default,
fwhm_L=fwhm_L.default,
fwhm_G=fwhm_G.default,
method=None,
**kwargs,
):
if str(method).lower() == "humlicek2" and HAS_SCIPY:
warnings.warn(
f"{method} has been deprecated since Astropy 5.3 and will be removed in a future version.\n"
"It is recommended to always use the `~scipy.special.wofz` implementation "
"when `scipy` is installed.",
AstropyDeprecationWarning,
)
if method is None:
if HAS_SCIPY:
method = "wofz"
else:
method = "humlicek2"
if str(method).lower() in ("wofz", "scipy"):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == "humlicek2":
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(
f"Not a valid method for Voigt1D Faddeeva function: {method}."
)
self.method = self._faddeeva.__name__
super().__init__(
x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs
)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`.
"""
if z.shape == self._last_z.shape and np.allclose(
z, self._last_z, rtol=1.0e-14, atol=1.0e-15
):
return self._last_w
self._last_z = (
z.to_value(u.dimensionless_unscaled) if isinstance(z, u.Quantity) else z
)
self._last_w = self._faddeeva(self._last_z)
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G):
"""
Derivative of the one dimensional Voigt function with respect to parameters.
"""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [
-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G,
]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm_L": inputs_unit[self.inputs[0]],
"fwhm_G": inputs_unit[self.inputs[0]],
"amplitude_L": outputs_unit[self.outputs[0]],
}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z = x + iy) combining Humlicek's rational approximations.
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
# fmt: off
AA = np.array(
[
+46236.3358828121, -147726.58393079657j,
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j,
]
) # 1j/sqrt(pi) to the 12. digit
bb = np.array(
[
+7918.06640624997,
-126689.0625,
+295607.8125,
-236486.25,
+84459.375,
-15015.0,
+1365.0,
-60.0,
+1.0,
]
)
# fmt: on
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
# fmt: off
# Recursive algorithms for the polynomials in Z with coefficients AA, bb
# numer = 0.0
# for A in AA[::-1]:
# numer = numer * Z + A
# Explicitly unrolled above loop for speed
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
# denom = 0.0
# for b in bb[::-1]:
# denom = denom * ZZ + b
# Explicitly unrolled above loop for speed
denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ +
bb[2])*ZZ + bb[1])*ZZ + bb[0]
# fmt: on
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters."""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(
default=1, description="Value of the constant function", mag=True
)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function."""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False, subok=True)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"amplitude": outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse", mag=True)
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"a": inputs_unit[self.inputs[0]],
"b": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0**2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return (
(self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0),
)
@property
def input_units(self):
x_unit = self.x_0.input_unit
y_unit = self.y_0.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function", mag=True)
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(
self,
amplitude=amplitude.default,
x_0=x_0.default,
y_0=y_0.default,
r_in=None,
width=None,
r_out=None,
**kwargs,
):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_in": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude", mag=True)
x_0 = Parameter(
default=0, description="X position of the center of the box function"
)
y_0 = Parameter(
default=0, description="Y position of the center of the box function"
)
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function."""
x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0)
y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[1]],
"x_width": inputs_unit[self.inputs[0]],
"y_width": inputs_unit[self.inputs[1]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function."""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.0
x3 = x_0 + width / 2.0
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(
default=1, description="Slope of tails of trapezoid in x direction"
)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function."""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False, subok=True)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
x_unit = self.x_0.input_unit
y_unit = self.y_0.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit["x"] != inputs_unit["y"]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"R_0": inputs_unit[self.inputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function."""
xx_ww = (x - x_0) ** 2 / (2 * sigma**2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function."""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2)
return amplitude * (1 - rr_ww) * np.exp(-rr_ww)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"sigma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(
default=1, description="Amplitude (peak value) of the Airy function"
)
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(
default=1,
description="The radius of the Airy disk (radius of first zero crossing)",
)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function."""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"radius": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function."""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters."""
fac = 1 + (x - x_0) ** 2 / gamma**2
d_A = fac ** (-alpha)
d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2)
d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3)
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(
default=0, description="X position of the maximum of the Moffat model"
)
y_0 = Parameter(
default=0, description="Y position of the maximum of the Moffat model"
)
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters."""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg))
d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
else:
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"gamma": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a float (in radians) or a |Quantity| angle"
),
)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2.0 * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {
self.inputs[0]: self.x_0.input_unit,
self.inputs[1]: self.y_0.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_0": inputs_unit[self.inputs[0]],
"y_0": inputs_unit[self.inputs[0]],
"r_eff": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(
default=1,
bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor",
)
r_core = Parameter(
default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius"
)
r_tide = Parameter(
default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius"
)
@property
def concentration(self):
"""Concentration parameter of the king model."""
return np.log10(np.abs(self.r_tide / self.r_core))
@staticmethod
def _core_func(x, r_core, r_tide, power=1):
return (
1.0 / np.sqrt(x**2 + r_core**2) ** power
- 1.0 / np.sqrt(r_tide**2 + r_core**2) ** power
)
@staticmethod
def _filter(x, r_tide, result):
"""Set invalid r values to 0"""
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.0
def evaluate(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, result)
return result
def fit_deriv(self, x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core**2 * self._core_func(x, r_core, r_tide) ** 2
self._filter(x, r_tide, d_amplitude)
d_r_core = (
-2.0
* amplitude
* r_core**3
* self._core_func(x, r_core, r_tide, power=3)
* self._core_func(x, r_core, r_tide)
+ 2 * amplitude * r_core * self._core_func(x, r_core, r_tide) ** 2
)
self._filter(x, r_tide, d_r_core)
d_r_tide = (
2 * amplitude * r_core**2 * r_tide * self._core_func(x, r_core, r_tide)
) / (r_core**2 + r_tide**2) ** (3 / 2)
self._filter(x, r_tide, d_r_tide)
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.input_unit is None:
return None
return {self.inputs[0]: self.r_core.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"r_core": inputs_unit[self.inputs[0]],
"r_tide": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.input_unit is None:
return None
return {self.inputs[0]: self.tau.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.input_unit is None:
return None
return {self.inputs[0]: self.tau.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
|
bfe5a9f41badcd8cd8b79f044735231a291f385969755c70eb074b3ef6bc539c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
from itertools import chain, product
import numpy as np
from astropy import units as u
from astropy import wcs
from .core import Model
from .parameters import InputParameterError, Parameter
from .utils import _to_orig_unit, _to_radian
# List of tuples of the form
# (long class name without suffix, short WCSLIB projection code):
_PROJ_NAME_CODE = [
("ZenithalPerspective", "AZP"),
("SlantZenithalPerspective", "SZP"),
("Gnomonic", "TAN"),
("Stereographic", "STG"),
("SlantOrthographic", "SIN"),
("ZenithalEquidistant", "ARC"),
("ZenithalEqualArea", "ZEA"),
("Airy", "AIR"),
("CylindricalPerspective", "CYP"),
("CylindricalEqualArea", "CEA"),
("PlateCarree", "CAR"),
("Mercator", "MER"),
("SansonFlamsteed", "SFL"),
("Parabolic", "PAR"),
("Molleweide", "MOL"),
("HammerAitoff", "AIT"),
("ConicPerspective", "COP"),
("ConicEqualArea", "COE"),
("ConicEquidistant", "COD"),
("ConicOrthomorphic", "COO"),
("BonneEqualArea", "BON"),
("Polyconic", "PCO"),
("TangentialSphericalCube", "TSC"),
("COBEQuadSphericalCube", "CSC"),
("QuadSphericalCube", "QSC"),
("HEALPix", "HPX"),
("HEALPixPolar", "XPH"),
]
_NOT_SUPPORTED_PROJ_CODES = ["ZPN"]
_PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE)
projcodes = [code for _, code in _PROJ_NAME_CODE]
__all__ = [
"Projection",
"Pix2SkyProjection",
"Sky2PixProjection",
"Zenithal",
"Cylindrical",
"PseudoCylindrical",
"Conic",
"PseudoConic",
"QuadCube",
"HEALPix",
"AffineTransformation2D",
"projcodes",
] + list(map("_".join, product(["Pix2Sky", "Sky2Pix"], chain(*_PROJ_NAME_CODE))))
class _ParameterDS(Parameter):
"""
Same as `Parameter` but can indicate its modified status via the ``dirty``
property. This flag also gets set automatically when a parameter is
modified.
This ability to track parameter's modified status is needed for automatic
update of WCSLIB's prjprm structure (which may be a more-time intensive
operation) *only as required*.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = True
def validate(self, value):
super().validate(value)
self.dirty = True
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj = wcs.Prjprm()
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
@property
def prjprm(self):
"""WCSLIB ``prjprm`` structure."""
self._update_prj()
return self._prj
def _update_prj(self):
"""
A default updater for projection's pv.
.. warning::
This method assumes that PV0 is never modified. If a projection
that uses PV0 is ever implemented in this module, that projection
class should override this method.
.. warning::
This method assumes that the order in which PVi values (i>0)
are to be assigned is identical to the order of model parameters
in ``param_names``. That is, pv[1] = model.parameters[0], ...
"""
if not self.param_names:
return
pv = []
dirty = False
for p in self.param_names:
param = getattr(self, p)
pv.append(float(param.value))
dirty |= param.dirty
param.dirty = False
if dirty:
self._prj.pv = None, *pv
self._prj.set()
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ("x", "y")
self.outputs = ("phi", "theta")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split("_")[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# without parameters:
self._prj.set()
self.inputs = ("phi", "theta")
self.outputs = ("x", "y")
@property
def input_units(self):
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def evaluate(self, phi, theta, *args, **kwargs):
self._update_prj()
return self._prj.prjs2x(phi, theta)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}
{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Look angle γ in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)",
)
theta0 = _ParameterDS(
default=90.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees",
)
theta0 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees",
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1"
)
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = _ParameterDS(default=0.0, description="Obliqueness parameter")
eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = _ParameterDS(default=0.0)
eta = _ParameterDS(default=0.0)
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0)
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(
default=90.0,
description="The latitude at which to minimize the error,in degrees",
)
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = _ParameterDS(
default=1.0, description="Distance from center of sphere in spherical radii"
)
lam = _ParameterDS(
default=1.0, description="Radius of the cylinder in spherical radii"
)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = _ParameterDS(default=1)
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x)
theta = np.array(y)
return phi, theta
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Bonne conformal latitude, in degrees",
)
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class HEALPix(Projection):
r"""Base class for HEALPix projections."""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(
default=4.0, description="The number of facets in longitude direction."
)
X = _ParameterDS(
default=3.0, description="The number of facets in latitude direction."
)
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
n_inputs = 2
n_outputs = 2
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array"
)
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not (
(np.ndim(value) == 1 and np.shape(value) == (2,))
or (np.ndim(value) == 2 and np.shape(value) == (1, 2))
):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array"
)
def __init__(self, matrix=matrix, translation=translation, **kwargs):
super().__init__(matrix=matrix, translation=translation, **kwargs)
self.inputs = ("x", "y")
self.outputs = ("x", "y")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
f"Transformation matrix is singular; {self.__class__.__name__} model"
" does not have an inverse"
)
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
# Use asarray to ensure loose the units.
inarr = np.vstack(
[np.asarray(x).ravel(), np.asarray(y).ravel(), np.ones(x.size, x.dtype)]
)
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
if not all([hasattr(translation, "unit"), hasattr(matrix, "unit")]):
raise ValueError(
"To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities."
)
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
return augmented_matrix
@property
def input_units(self):
translation_unit = self.translation.input_unit
matrix_unit = self.matrix.input_unit
if translation_unit is None and matrix_unit is None:
return None
elif translation_unit is not None:
return dict(zip(self.inputs, [translation_unit] * 2))
else:
return dict(zip(self.inputs, [matrix_unit] * 2))
for long_name, short_name in _PROJ_NAME_CODE:
# define short-name projection equivalent classes:
globals()["Pix2Sky_" + short_name] = globals()["Pix2Sky_" + long_name]
globals()["Sky2Pix_" + short_name] = globals()["Sky2Pix_" + long_name]
# set inverse classes:
globals()["Pix2Sky_" + long_name]._inv_cls = globals()["Sky2Pix_" + long_name]
globals()["Sky2Pix_" + long_name]._inv_cls = globals()["Pix2Sky_" + long_name]
|
e5488c90ddefeecbc72ca86aae400650dfc404ac735b481ef4a122a204b2621d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
# pylint: disable=invalid-name
from math import comb
import numpy as np
from astropy.utils import check_broadcast, indent
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import _validate_domain_window, poly_map_domain
__all__ = [
"Chebyshev1D",
"Chebyshev2D",
"Hermite1D",
"Hermite2D",
"InverseSIP",
"Legendre1D",
"Legendre2D",
"Polynomial1D",
"Polynomial2D",
"SIP",
"OrthoPolynomialBase",
"PolynomialModel",
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(
self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params
):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set.
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append(f"c{n}")
else:
for i in range(self.degree + 1):
names.append(f"c{i}_{0}")
for i in range(1, self.degree + 1):
names.append(f"c{0}_{i}")
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append(f"c{i}_{j}")
return tuple(names)
class _PolyDomainWindow1D(PolynomialModel):
"""
This class sets ``domain`` and ``window`` of 1D polynomials.
"""
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree, n_models, model_set_axis, name=name, meta=meta, **params
)
self._set_default_domain_window(domain, window)
@property
def window(self):
return self._window
@window.setter
def window(self, val):
self._window = _validate_domain_window(val)
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, val):
self._domain = _validate_domain_window(val)
def _set_default_domain_window(self, domain, window):
"""
This method sets the ``domain`` and ``window`` attributes on 1D subclasses.
"""
self._default_domain_window = {"domain": None, "window": (-1, 1)}
self.window = window or (-1, 1)
self.domain = domain
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={"domain": self.domain, "window": self.window},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[("Degree", self.degree), ("Domain", self.domain), ("Window", self.window)],
self._default_domain_window,
)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window``
see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
x_window : tuple or None, optional
range of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
y_window : tuple or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
n_inputs = 2
n_outputs = 1
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
# Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses.
self._default_domain_window = {
"x_window": (-1, 1),
"y_window": (-1, 1),
"x_domain": None,
"y_domain": None,
}
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
self.x_domain = x_domain
self.y_domain = y_domain
self._param_names = self._generate_coeff_names()
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
def __repr__(self):
return self._format_repr(
[self.x_degree, self.y_degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("X_Degree", self.x_degree),
("Y_Degree", self.y_degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def get_num_coeff(self):
"""
Determine how many coefficients are needed.
Returns
-------
numc : int
number of coefficients
"""
if self.x_degree < 0 or self.y_degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = f"c{i}_{j}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, "r" + str(n), 0.0)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, "r" + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, "r" + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, "r" + str(i), 0.0)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, "r" + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append(f"c{i}_{j}")
return tuple(names)
def _fcache(self, x, y):
"""
Computation and store the individual functions.
To be implemented by subclasses"
"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), broadcasted_shapes
class Chebyshev1D(_PolyDomainWindow1D):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
For explanation of ```domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window.
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain=domain,
window=window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(_PolyDomainWindow1D):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(_PolyDomainWindow1D):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(_PolyDomainWindow1D):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
If None, it is set to (-1, 1)
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain,
window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
# Set domain separately because it's different from
# the orthogonal polynomials.
self._default_domain_window = {
"domain": (-1, 1),
"window": (-1, 1),
}
self.domain = domain or self._default_domain_window["domain"]
self.window = window or self._default_domain_window["window"]
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.input_unit is None:
return None
else:
return {self.inputs[0]: self.c0.input_unit / self.c1.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
par = getattr(self, f"c{i}")
mapping[par.name] = (
outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i
)
return mapping
class Polynomial2D(PolynomialModel):
r"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
Polynomial degree: largest sum of exponents (:math:`i + j`) of
variables in each monomial term of the form :math:`x^i y^j`. The
number of terms in a 2D polynomial of degree ``n`` is given by binomial
coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`.
x_domain : tuple or None, optional
domain of the x independent variable
If None, it is set to (-1, 1)
y_domain : tuple or None, optional
domain of the y independent variable
If None, it is set to (-1, 1)
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the x_domain to x_window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the y_domain to y_window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
degree,
x_domain=None,
y_domain=None,
x_window=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
self._default_domain_window = {
"x_domain": (-1, 1),
"y_domain": (-1, 1),
"x_window": (-1, 1),
"y_window": (-1, 1),
}
self.x_domain = x_domain or self._default_domain_window["x_domain"]
self.y_domain = y_domain or self._default_domain_window["y_domain"]
self.x_window = x_window or self._default_domain_window["x_window"]
self.y_window = y_window or self._default_domain_window["y_window"]
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
return (x, y), broadcasted_shapes
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def __repr__(self):
return self._format_repr(
[self.degree],
kwargs={
"x_domain": self.x_domain,
"y_domain": self.y_domain,
"x_window": self.x_window,
"y_window": self.y_window,
},
defaults=self._default_domain_window,
)
def __str__(self):
return self._format_str(
[
("Degree", self.degree),
("X_Domain", self.x_domain),
("Y_Domain", self.y_domain),
("X_Window", self.x_window),
("Y_Window", self.y_window),
],
self._default_domain_window,
)
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError("Expected x and y to be of equal size")
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x**i) * (y**j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = f"c{j}_{i}"
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme.
Parameters
----------
x, y : array
coeffs : array
Coefficients in inverse lexical order.
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (
self.c1_0.input_unit is None and self.c0_1.input_unit is None
):
return None
return {
self.inputs[0]: self.c0_0.input_unit / self.c1_0.input_unit,
self.inputs[1]: self.c0_0.input_unit / self.c0_1.input_unit,
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, f"c{i}_{j}")
mapping[par.name] = (
outputs_unit[self.outputs[0]]
/ inputs_unit[self.inputs[0]] ** i
/ inputs_unit[self.inputs[1]] ** j
)
return mapping
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series.
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (
(2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]
) / n
for n in range(2, y_terms):
kfunc[n + x_terms] = (
(2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1]
- (n - 1) * kfunc[n + x_terms - 2]
) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial."""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(
self,
order,
coeff_prefix,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(
param_name, default=np.zeros(minshape)
)
super().__init__(
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[("Order", self.order), ("Coeff. Prefix", self.coeff_prefix)]
)
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set.
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{i}_{0}")
for i in range(2, self.order + 1):
names.append(f"{coeff_prefix}_{0}_{i}")
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append(f"{coeff_prefix}_{i}_{j}")
return tuple(names)
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{i}_{0}"
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = f"{coeff_prefix}_{0}_{i}"
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = f"{coeff_prefix}_{i}_{j}"
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == "A":
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x**i * y**j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or (2,) ndarray
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005
<https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
crpix,
a_order,
b_order,
a_coeff={},
b_coeff={},
ap_order=None,
bp_order=None,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(
a_order,
coeff_prefix="A",
n_models=n_models,
model_set_axis=model_set_axis,
**a_coeff,
)
self.sip1d_b = _SIP1D(
b_order,
coeff_prefix="B",
n_models=n_models,
model_set_axis=model_set_axis,
**b_coeff,
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
self._inputs = ("u", "v")
self._outputs = ("x", "y")
def __repr__(self):
return (
f"<{self.__class__.__name__}"
f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>"
)
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
@property
def inverse(self):
if self._ap_order is not None and self._bp_order is not None:
return InverseSIP(
self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff
)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial.
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(
self,
ap_order,
bp_order,
ap_coeff={},
bp_coeff={},
n_models=None,
model_set_axis=None,
name=None,
meta=None,
):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault("AP_0_0", 0)
bp_coeff.setdefault("BP_0_0", 0)
ap_coeff_params = {k.replace("AP_", "c"): v for k, v in ap_coeff.items()}
bp_coeff_params = {k.replace("BP_", "c"): v for k, v in bp_coeff.items()}
self.sip1d_ap = Polynomial2D(
degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params
)
self.sip1d_bp = Polynomial2D(
degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params
)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta
)
def __repr__(self):
return f"<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>"
def __str__(self):
parts = [f"Model: {self.__class__.__name__}"]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append("")
return "\n".join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
43c11a74c8244a388212fbdece878784468f270e30367f6281d7c6f3c13f539a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
# pylint: disable=invalid-name, no-member
import warnings
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor. If dimensionless, input units will assumed
to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr).
If not dimensionless, must be equivalent to either
(erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr),
in which case the result will be returned in the requested units and
the scale will be stripped of units (with the float value applied).
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (Hz s sr cm2)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(
default=5000.0, min=0, unit=u.K, description="Blackbody temperature"
)
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz or wavelengths
# in AA (depending on the choice of output units controlled by units on scale
# and stored in self._output_units during init).
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
# Store the native units returned by B_nu equation
_native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# Store the base native output units. If scale is not dimensionless, it
# must be equivalent to one of these. If equivalent to SLAM, then
# input_units will expect AA for 'x', otherwise Hz.
_native_output_units = {
"SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr),
"SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr),
}
def __init__(self, *args, **kwargs):
scale = kwargs.get("scale", None)
# Support scale with non-dimensionless unit by stripping the unit and
# storing as self._output_units.
if hasattr(scale, "unit") and not scale.unit.is_equivalent(
u.dimensionless_unscaled
):
output_units = scale.unit
if not output_units.is_equivalent(
self._native_units, u.spectral_density(1 * u.AA)
):
raise ValueError(
"scale units not dimensionless or in "
f"surface brightness: {output_units}"
)
kwargs["scale"] = scale.value
self._output_units = output_units
else:
self._output_units = self._native_units
return super().__init__(*args, **kwargs)
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which will depend on the
# units compatible with the expected output units.
if self._output_units.is_equivalent(self._native_output_units["SNU"]):
return {self.inputs[0]: u.Hz}
else:
# only other option is equivalent with SLAM
return {self.inputs[0]: u.AA}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
scale = self.scale.quantity.to(u.dimensionless_unscaled)
else:
scale = self.scale.value
# bolometric flux in the native units of the planck function
native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm**2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0, description="Peak Value")
x_0 = Parameter(default=1.0, description="Position of the peak")
fwhm = Parameter(default=1.0, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function.
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0**2 / fwhm**2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm**2 / x_0**3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"fwhm": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
@x_0.validator
def x_0(self, val):
"""Ensure `x_0` is not 0."""
if np.any(val == 0):
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
class Plummer1D(Fittable1DModel):
r"""One dimensional Plummer density profile model.
Parameters
----------
mass : float
Total mass of cluster.
r_plum : float
Scale parameter which sets the size of the cluster core.
Notes
-----
Model formula:
.. math::
\rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2}
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P
"""
mass = Parameter(default=1.0, description="Total mass of cluster")
r_plum = Parameter(
default=1.0,
description="Scale parameter which sets the size of the cluster core",
)
@staticmethod
def evaluate(x, mass, r_plum):
"""
Evaluate plummer density profile model.
"""
return (
(3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2)
)
@staticmethod
def fit_deriv(x, mass, r_plum):
"""
Plummer1D model derivatives.
"""
d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2)))
d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / (
(4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2)
)
return [d_mass, d_r_plum]
@property
def input_units(self):
mass_unit = self.mass.input_unit
r_plum_unit = self.r_plum.input_unit
if mass_unit is None and r_plum_unit is None:
return None
return {self.inputs[0]: r_plum_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3,
"r_plum": inputs_unit[self.inputs[0]],
}
class NFW(Fittable1DModel):
r"""
Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter.
Parameters
----------
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
massfactor : tuple or str
Mass overdensity factor and type for provided profiles:
Tuple version:
("virial",) : virial radius
("critical", N) : radius where density is N times that of the critical density
("mean", N) : radius where density is N times that of the mean density
String version:
"virial" : virial radius
"Nc" : radius where density is N times that of the critical density (e.g. "200c")
"Nm" : radius where density is N times that of the mean density (e.g. "500m")
cosmo : :class:`~astropy.cosmology.Cosmology`
Background cosmology for density calculation. If None, the default cosmology will be used.
Notes
-----
Model formula:
.. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2}
References
----------
.. [1] https://arxiv.org/pdf/astro-ph/9508025
.. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile
.. [3] https://en.wikipedia.org/wiki/Virial_mass
"""
# Model Parameters
# NFW Profile mass
mass = Parameter(
default=1.0,
min=1.0,
unit=u.M_sun,
description="Peak mass within specified overdensity radius",
)
# NFW profile concentration
concentration = Parameter(default=1.0, min=1.0, description="Concentration")
# NFW Profile redshift
redshift = Parameter(default=0.0, min=0.0, description="Redshift")
# We allow values without units to be passed when evaluating the model, and
# in this case the input r values are assumed to be lengths / positions in kpc.
_input_units_allow_dimensionless = True
def __init__(
self,
mass=u.Quantity(mass.default, mass.unit),
concentration=concentration.default,
redshift=redshift.default,
massfactor=("critical", 200),
cosmo=None,
**kwargs,
):
# Set default cosmology
if cosmo is None:
# LOCAL
from astropy.cosmology import default_cosmology
cosmo = default_cosmology.get()
# Set mass overdensity type and factor
self._density_delta(massfactor, cosmo, redshift)
# Establish mass units for density calculation (default solar masses)
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Obtain scale radius
self._radius_s(mass, concentration)
# Obtain scale density
self._density_s(mass, concentration)
super().__init__(
mass=in_mass, concentration=concentration, redshift=redshift, **kwargs
)
def evaluate(self, r, mass, concentration, redshift):
"""
One dimensional NFW profile function.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of density to be calculated for the NFW profile.
mass : float or `~astropy.units.Quantity` ['mass']
Mass of NFW peak within specified overdensity radius.
concentration : float
Concentration of the NFW profile.
redshift : float
Redshift of the NFW profile.
Returns
-------
density : float or `~astropy.units.Quantity` ['density']
NFW profile mass density at location ``r``. The density units are:
[``mass`` / ``r`` ^3]
Notes
-----
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Create radial version of input with dimension
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Define reduced radius (r / r_{\\rm s})
# also update scale radius
radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit)
# Density distribution
# \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2}
# also update scale density
density = self._density_s(mass, concentration) / (
radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2
)
if hasattr(mass, "unit"):
return density
else:
return density.value
def _density_delta(self, massfactor, cosmo, redshift):
"""
Calculate density delta.
"""
# Set mass overdensity type and factor
if isinstance(massfactor, tuple):
# Tuple options
# ("virial") : virial radius
# ("critical", N) : radius where density is N that of the critical density
# ("mean", N) : radius where density is N that of the mean density
if massfactor[0].lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor[0].lower()
elif massfactor[0].lower() == "critical":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "c"
elif massfactor[0].lower() == "mean":
# Critical or Mean Overdensity Mass
delta = float(massfactor[1])
masstype = "m"
else:
raise ValueError(
f"Massfactor '{massfactor[0]}' not one of 'critical', "
"'mean', or 'virial'"
)
else:
try:
# String options
# virial : virial radius
# Nc : radius where density is N that of the critical density
# Nm : radius where density is N that of the mean density
if massfactor.lower() == "virial":
# Virial Mass
delta = None
masstype = massfactor.lower()
elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m":
# Critical or Mean Overdensity Mass
delta = float(massfactor[0:-1])
masstype = massfactor[-1].lower()
else:
raise ValueError(
f"Massfactor {massfactor} string not of the form "
"'#m', '#c', or 'virial'"
)
except (AttributeError, TypeError):
raise TypeError(f"Massfactor {massfactor} not a tuple or string")
# Set density from masstype specification
if masstype == "virial":
Om_c = cosmo.Om(redshift) - 1.0
d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2
self.density_delta = d_c * cosmo.critical_density(redshift)
elif masstype == "c":
self.density_delta = delta * cosmo.critical_density(redshift)
elif masstype == "m":
self.density_delta = (
delta * cosmo.critical_density(redshift) * cosmo.Om(redshift)
)
return self.density_delta
@staticmethod
def A_NFW(y):
r"""
Dimensionless volume integral of the NFW profile, used as an intermediate step in some
calculations for this model.
Notes
-----
Model formula:
.. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}]
"""
return np.log(1.0 + y) - (y / (1.0 + y))
def _density_s(self, mass, concentration):
"""
Calculate scale density of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Calculate scale density
# M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right].
self.density_s = in_mass / (
4.0
* np.pi
* self._radius_s(in_mass, concentration) ** 3
* self.A_NFW(concentration)
)
return self.density_s
@property
def rho_scale(self):
r"""
Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`.
"""
return self.density_s
def _radius_s(self, mass, concentration):
"""
Calculate scale radius of the NFW profile.
"""
# Enforce default units
if not isinstance(mass, u.Quantity):
in_mass = u.Quantity(mass, u.M_sun)
else:
in_mass = mass
# Delta Mass is related to delta radius by
# M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c}
# And delta radius is related to the NFW scale radius by
# c = R / r_{\\rm s}
self.radius_s = (
((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0)
) / concentration
# Set radial units to kiloparsec by default (unit will be rescaled by units of radius
# in evaluate)
return self.radius_s.to(u.kpc)
@property
def r_s(self):
"""
Scale radius of the NFW profile.
"""
return self.radius_s
@property
def r_virial(self):
"""
Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.).
"""
return self.r_s * self.concentration
@property
def r_max(self):
"""
Radius of maximum circular velocity.
"""
return self.r_s * 2.16258
@property
def v_max(self):
"""
Maximum circular velocity.
"""
return self.circular_velocity(self.r_max)
def circular_velocity(self, r):
r"""
Circular velocities of the NFW profile.
Parameters
----------
r : float or `~astropy.units.Quantity` ['length']
Radial position of velocity to be calculated for the NFW profile.
Returns
-------
velocity : float or `~astropy.units.Quantity` ['speed']
NFW profile circular velocity at location ``r``. The velocity units are:
[km / s]
Notes
-----
Model formula:
.. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
.. math:: x = r/r_s
.. warning::
Output values might contain ``nan`` and ``inf``.
"""
# Enforce default units (if parameters are without units)
if hasattr(r, "unit"):
in_r = r
else:
in_r = u.Quantity(r, u.kpc)
# Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir)
v_profile = np.sqrt(
self.mass
* const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2))
/ self.r_virial
)
# Define reduced radius (r / r_{\\rm s})
reduced_radius = in_r / self.r_virial.to(in_r.unit)
# Circular velocity given by:
# v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)}
# where x=r/r_{200}
velocity = np.sqrt(
(v_profile**2 * self.A_NFW(self.concentration * reduced_radius))
/ (reduced_radius * self.A_NFW(self.concentration))
)
return velocity.to(u.km / u.s)
@property
def input_units(self):
# The units for the 'r' variable should be a length (default kpc)
return {self.inputs[0]: u.kpc}
@property
def return_units(self):
# The units for the 'density' variable should be a matter density (default M_sun / kpc^3)
if self.mass.unit is None:
return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3}
else:
return {
self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3
}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"mass": u.M_sun, "concentration": None, "redshift": None}
|
f6271f3d0b1bd17fe7ed20f5bb79b15678b86bc7b5e64803d8e14688ff011e83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants.
"""
# pylint: disable=invalid-name
import numpy as np
from astropy.units import Magnitude, Quantity, UnitsError, dimensionless_unscaled, mag
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = [
"PowerLaw1D",
"BrokenPowerLaw1D",
"SmoothlyBrokenPowerLaw1D",
"ExponentialCutoffPowerLaw1D",
"LogParabola1D",
"Schechter1D",
]
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1, description="Peak value at the reference point")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters."""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=1, description="Power law index before break point")
alpha_2 = Parameter(default=1, description="Power law index after break point")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.input_unit is None:
return None
return {self.inputs[0]: self.x_break.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(
default=1, min=0, description="Peak value at break point", mag=True
)
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=-2, description="Power law index before break point")
alpha_2 = Parameter(default=2, description="Power law index after break point")
delta = Parameter(default=1, min=1.0e-3, description="Smoothness Parameter")
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError("amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError("delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function."""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = logt < -threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
i = np.abs(logt) <= threshold
if i.max():
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False, subok=True)
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters.
"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_2) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if i.max():
f[i] = (
amplitude * xx[i] ** (-alpha_1) / (2.0 ** ((alpha_1 - alpha_2) * delta))
)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if i.max():
t = np.exp(logt[i])
r = (1.0 + t) / 2.0
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = (
f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2.0 / r) / x_break
)
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = (
f[i]
* (alpha_1 - alpha_2)
* (np.log(r) - t / (1.0 + t) / delta * np.log(xx[i]))
)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.input_unit is None:
return None
return {self.inputs[0]: self.x_break.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
x_cutoff = Parameter(default=1, description="Cutoff point")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function."""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""
One dimensional exponential cutoff power law derivative with respect to parameters.
"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff**2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"x_cutoff": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and
:math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(
\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}}
\\right )}}
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
beta = Parameter(default=0, description="Power law curvature")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function."""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx**exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters."""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx**exponent
d_beta = -amplitude * d_amplitude * log_xx**2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
class Schechter1D(Fittable1DModel):
r"""
Schechter luminosity function (`Schechter 1976
<https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_),
parameterized in terms of magnitudes.
Parameters
----------
phi_star : float
The normalization factor in units of number density.
m_star : float
The characteristic magnitude where the power-law form of the
function cuts off.
alpha : float
The power law index, also known as the faint-end slope. Must not
have units.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}`
for ``m_star``, and :math:`\alpha` for ``alpha``):
.. math::
n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \
[{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \
\exp{[-10^{0.4 (M^{*} - M)}]} \ dM
``phi_star`` is the normalization factor in units of number density.
``m_star`` is the characteristic magnitude where the power-law form
of the function cuts off into the exponential form. ``alpha`` is
the power-law index, defining the faint-end slope of the luminosity
function.
Examples
--------
.. plot::
:include-source:
from astropy.modeling.models import Schechter1D
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
phi_star = 4.3e-4 * (u.Mpc ** -3)
m_star = -20.26
alpha = -1.98
model = Schechter1D(phi_star, m_star, alpha)
mag = np.linspace(-25, -17)
fig, ax = plt.subplots()
ax.plot(mag, model(mag))
ax.set_yscale('log')
ax.set_xlim(-22.6, -17)
ax.set_ylim(1.e-7, 1.e-2)
ax.set_xlabel('$M_{UV}$')
ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$')
References
----------
.. [1] Schechter 1976; ApJ 203, 297
(https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract)
.. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_
"""
phi_star = Parameter(
default=1.0, description="Normalization factor in units of number density"
)
m_star = Parameter(default=-20.0, description="Characteristic magnitude", mag=True)
alpha = Parameter(default=-1.0, description="Faint-end slope")
@staticmethod
def _factor(magnitude, m_star):
factor_exp = magnitude - m_star
if isinstance(factor_exp, Quantity):
if factor_exp.unit == mag:
factor_exp = Magnitude(factor_exp.value, unit=mag)
return factor_exp.to(dimensionless_unscaled)
else:
raise UnitsError(
"The units of magnitude and m_star must be a magnitude"
)
else:
return 10 ** (-0.4 * factor_exp)
def evaluate(self, mag, phi_star, m_star, alpha):
"""Schechter luminosity function model function."""
factor = self._factor(mag, m_star)
return 0.4 * np.log(10) * phi_star * factor ** (alpha + 1) * np.exp(-factor)
def fit_deriv(self, mag, phi_star, m_star, alpha):
"""
Schechter luminosity function derivative with respect to
parameters.
"""
factor = self._factor(mag, m_star)
d_phi_star = 0.4 * np.log(10) * factor ** (alpha + 1) * np.exp(-factor)
func = phi_star * d_phi_star
d_m_star = (alpha + 1) * 0.4 * np.log(10) * func - (
0.4 * np.log(10) * func * factor
)
d_alpha = func * np.log(factor)
return [d_phi_star, d_m_star, d_alpha]
@property
def input_units(self):
if self.m_star.input_unit is None:
return None
return {self.inputs[0]: self.m_star.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"m_star": inputs_unit[self.inputs[0]],
"phi_star": outputs_unit[self.outputs[0]],
}
|
a7cef9efa2a792eb384bd48867f07b61025c53e3a81e2509878103a91ca37de3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import queue
import select
import socket
import threading
import time
import uuid
import warnings
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from astropy import log
from .constants import SAMP_STATUS_OK, __profile_version__
from .errors import SAMPHubError, SAMPProxyError, SAMPWarning
from .lockfile_helpers import create_lock_file, read_lockfile
from .standard_profile import ThreadingXMLRPCServer
from .utils import ServerProxyPool, _HubAsClient, internet_on
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ["SAMPHubServer", "WebProfileDialog"]
__doctest_skip__ = [".", "SAMPHubServer.*"]
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(
self,
secret=None,
addr=None,
port=0,
lockfile=None,
timeout=0,
client_timeout=0,
mode="single",
label="",
web_profile=True,
web_profile_dialog=None,
web_port=21012,
pool_size=20,
):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, "samp.hub.ping")
server.register_function(
self._set_xmlrpc_callback, "samp.hub.setXmlrpcCallback"
)
# Standard API operations
server.register_function(self._register, "samp.hub.register")
server.register_function(self._unregister, "samp.hub.unregister")
server.register_function(self._declare_metadata, "samp.hub.declareMetadata")
server.register_function(self._get_metadata, "samp.hub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.hub.declareSubscriptions"
)
server.register_function(self._get_subscriptions, "samp.hub.getSubscriptions")
server.register_function(
self._get_registered_clients, "samp.hub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.hub.getSubscribedClients"
)
server.register_function(self._notify, "samp.hub.notify")
server.register_function(self._notify_all, "samp.hub.notifyAll")
server.register_function(self._call, "samp.hub.call")
server.register_function(self._call_all, "samp.hub.callAll")
server.register_function(self._call_and_wait, "samp.hub.callAndWait")
server.register_function(self._reply, "samp.hub.reply")
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, "samp.webhub.ping")
server.register_function(self._unregister, "samp.webhub.unregister")
server.register_function(self._declare_metadata, "samp.webhub.declareMetadata")
server.register_function(self._get_metadata, "samp.webhub.getMetadata")
server.register_function(
self._declare_subscriptions, "samp.webhub.declareSubscriptions"
)
server.register_function(
self._get_subscriptions, "samp.webhub.getSubscriptions"
)
server.register_function(
self._get_registered_clients, "samp.webhub.getRegisteredClients"
)
server.register_function(
self._get_subscribed_clients, "samp.webhub.getSubscribedClients"
)
server.register_function(self._notify, "samp.webhub.notify")
server.register_function(self._notify_all, "samp.webhub.notifyAll")
server.register_function(self._call, "samp.webhub.call")
server.register_function(self._call_all, "samp.webhub.callAll")
server.register_function(self._call_and_wait, "samp.webhub.callAndWait")
server.register_function(self._reply, "samp.webhub.reply")
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, "samp.webhub.register")
server.register_function(
self._web_profile_allowReverseCallbacks, "samp.webhub.allowReverseCallbacks"
)
server.register_function(
self._web_profile_pullCallbacks, "samp.webhub.pullCallbacks"
)
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log,
logRequests=False,
allow_none=True,
)
prot = "http"
self._port = self._server.socket.getsockname()[1]
addr = f"{self._addr or self._host_name}:{self._port}"
self._url = urlunparse((prot, addr, "", "", "", ""))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = self._web_profile_requests_queue
self._web_profile_dialog.queue_result = self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
("localhost", self._web_port), log, logRequests=False, allow_none=True
)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except OSError:
log.warning(
"Port {} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning,
)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn(
"Timeout expired, Hub is shutting down!", SAMPWarning
)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.0:
for private_key in self._client_activity_time.keys():
if (
now - self._client_activity_time[private_key]
> self._client_timeout
and private_key != self._hub_private_key
):
warnings.warn(
f"Client {private_key} timeout expired!", SAMPWarning
)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == "samp.client.receiveCall":
return self._receive_call(*args)
elif method == "samp.client.receiveNotification":
return self._receive_notification(*args)
elif method == "samp.client.receiveResponse":
return self._receive_response(*args)
elif method == "samp.app.ping":
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {
"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "https://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon",
}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(
self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}}
)
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(
lockfilename=self._customlockfilename,
mode=self._mode,
hub_id=self.id,
hub_params=self.params,
)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile).
"""
params = {}
# Keys required by standard profile
params["samp.secret"] = self._hub_secret
params["samp.hub.xmlrpc.url"] = self._url
params["samp.profile.version"] = __profile_version__
# Custom keys
params["hub.id"] = self.id
params["hub.label"] = self._label or f"Hub {self.id}"
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub, name="Hub timeout test"
)
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client, name="Client timeout test"
)
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict["samp.secret"] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.0)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if (
self._thread_hub_timeout is not None
and self._thread_hub_timeout is not current_thread
):
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if (
self._thread_client_timeout is not None
and self._thread_client_timeout is not current_thread
):
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(
request, self._web_profile_requests_result
)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select(
[self._web_profile_server.socket], [], [], 0.01
)[0]
except OSError as exc:
warnings.warn(
f"Call to select() in SAMPHubServer failed: {exc}", SAMPWarning
)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(
self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}},
)
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id},
},
)
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id},
},
)
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.metadata",
"samp.params": {
"id": public_id,
"metadata": self._metadata[private_key],
},
},
)
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(
self._hub_private_key,
self._private_keys[key][0],
{
"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {
"id": public_id,
"subscriptions": self._id2mtypes[private_key],
},
},
)
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(
private_key, hub_public_id, message
)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug(f"notify disconnection to {public_id}")
self._launch_thread(
target=_xmlrpc_call_disconnect,
args=(
endpoint,
private_key,
self._hub_public_id,
{
"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"},
},
),
)
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (
xmlrpc_addr,
_HubAsClient(self._hub_as_client_request_handler),
)
return ""
# Dictionary stored with the public id
log.debug(f"set_xmlrpc_callback: {private_key} {xmlrpc_addr}")
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(
self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1
)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug(f"register: private-key = {private_key} and self-id = {public_id}")
return {
"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id,
}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = "cli#hub"
if self._client_id_counter > 0:
public_id = f"cli#{self._client_id_counter}"
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug(f"unregister {public_key} ({private_key})")
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(
"declare_metadata: private-key = {} metadata = {}".format(
private_key, str(metadata)
)
)
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug(
f"get_metadata: private-key = {private_key} client-id = {client_id}"
)
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug(f"--> metadata = {self._metadata[client_private_key]}")
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug(
"declare_subscriptions: private-key = {} mtypes = {}".format(
private_key, str(mtypes)
)
)
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and mtype2 != mtype:
if mtype2 in mtypes:
del mtypes[mtype2]
log.debug(
"declare_subscriptions: subscriptions accepted from {} => {}".format(
private_key, str(mtypes)
)
)
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug(
"get_subscriptions: client-id = {} mtypes = {}".format(
client_id, str(self._id2mtypes[client_private_key])
)
)
return self._id2mtypes[client_private_key]
else:
log.debug(
f"get_subscriptions: client-id = {client_id} mtypes = missing"
)
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug(
"get_registered_clients: private_key = {} clients = {}".format(
private_key, reg_clients
)
)
return reg_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug(
"get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients)
)
return sub_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[: i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
self._launch_thread(
target=self._notify_, args=(private_key, recipient_id, message)
)
return {}
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug(
"notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id, recipient_public_id
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(
recipient_private_key, recipient_public_id, samp_method_name, arg_params
)
except Exception as exc:
warnings.warn(
"{} notification from client {} to client {} failed [{}]".format(
message["samp.mtype"], sender_public_id, recipient_public_id, exc
),
SAMPWarning,
)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(
target=self._notify,
args=(sender_private_key, _recipient_id, message),
)
if not recipient_ids:
warnings.warn(
"No client was able to receive this message",
SAMPWarning,
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if not (
self._is_subscribed(
self._public_id_to_private_key(recipient_id), message["samp.mtype"]
)
):
raise SAMPProxyError(
2,
"Client {} not subscribed to MType {}".format(
recipient_id, message["samp.mtype"]
),
)
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(
target=self._call_,
args=(private_key, public_id, recipient_id, msg_id, message),
)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_(
self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message
):
if sender_private_key not in self._private_keys:
return
try:
log.debug(
"call {} from {} to {} ({})".format(
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
message["samp.mtype"],
)
)
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(
recipient_private_key, recipient_public_id, samp_methodName, arg_params
)
except Exception as exc:
warnings.warn(
"{} call {} from client {} to client {} failed [{},{}]".format(
message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id,
type(exc),
exc,
),
SAMPWarning,
)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(
3,
f"samp.mtype keyword is missing in message tagged as {msg_tag}",
)
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(
target=self._call_,
args=(
sender_private_key,
sender_public_id,
receiver_public_id,
_msg_id,
message,
),
)
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call", message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del self._sync_msg_ids_heap[msg_id]
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del self._sync_msg_ids_heap[msg_id]
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(
target=self._reply_, args=(private_key, msg_id, response)
)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(
";;", 3
)
try:
log.debug(
f"reply {counter} from {responder_public_id} to {recipient_public_id}"
)
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(
recipient_public_id
)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(
recipient_private_key,
recipient_public_id,
samp_method_name,
arg_params,
)
except Exception as exc:
warnings.warn(
"{} reply from client {} to client {} failed [{}]".format(
recipient_msg_tag, responder_public_id, recipient_public_id, exc
),
SAMPWarning,
)
def _retry_method(
self, recipient_private_key, recipient_public_id, samp_method_name, arg_params
):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (
self._web_profile
and recipient_private_key in self._web_profile_callbacks
):
# Web Profile
callback = {
"samp.methodName": samp_method_name,
"samp.params": arg_params,
}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(
recipient_private_key, *arg_params
)
except xmlrpc.Fault as exc:
log.debug(
"{} XML-RPC endpoint error (attempt {}): {}".format(
recipient_public_id, attempt + 1, exc.faultString
)
)
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = (
samp_method_name + " failed after " + str(conf.n_retries) + " attempts"
)
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(
self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id,
sender_msg_id,
)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}},
)
elif "samp.mtype" in message and (
message["samp.mtype"] == "x-samp.query.by-meta"
or message["samp.mtype"] == "samp.query.by-meta"
):
ids_list = self._query_by_metadata(
message["samp.params"]["key"], message["samp.params"]["value"]
)
self._reply(
self._hub_private_key,
msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}},
)
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(
self, identity_info, client_address=("unknown", 0), origin="unknown"
):
self._update_last_activity_time()
if client_address[0] not in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(
403,
"Request of registration rejected "
"by the Hub (application name not "
"provided).",
)
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address, origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = "http://localhost:{}/translator/{}?ref=".format(
self._web_port, register_map["samp.private-key"]
)
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
86f5fb17da166062176d333e260fb1cfeebefd083ea8b70f024b20cffa863494 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["quantity_input"]
import inspect
import typing as T
from collections.abc import Sequence
from functools import wraps
from numbers import Number
import numpy as np
from .core import (
Unit,
UnitBase,
UnitsError,
add_enabled_equivalencies,
dimensionless_unscaled,
)
from .physical import PhysicalType, get_physical_type
from .quantity import Quantity
NoneType = type(None)
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
unit = get_physical_type(target)._unit
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise ValueError(f"Invalid unit or physical type {target!r}.") from None
allowed_units.append(unit)
return allowed_units
def _validate_arg_value(
param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False
):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
# If dimensionless is an allowed unit and the argument is unit-less,
# allow numbers or numpy arrays with numeric dtypes
if (
dimensionless_unscaled in allowed_units
and not strict_dimensionless
and not hasattr(arg, "unit")
):
if isinstance(arg, Number):
return
elif isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number):
return
for allowed_unit in allowed_units:
try:
if arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies):
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError(
f"Argument '{param_name}' to function '{func_name}'"
f" has {error_msg}. You should pass in an astropy "
"Quantity instead."
)
else:
error_msg = (
f"Argument '{param_name}' to function '{func_name}' must "
"be in units convertible to"
)
if len(targets) > 1:
targ_names = ", ".join([f"'{str(targ)}'" for targ in targets])
raise UnitsError(f"{error_msg} one of: {targ_names}.")
else:
raise UnitsError(f"{error_msg} '{str(targets[0])}'.")
def _parse_annotation(target):
if target in (None, NoneType, inspect._empty):
return target
# check if unit-like
try:
unit = Unit(target)
except (TypeError, ValueError):
try:
ptype = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
if isinstance(target, str):
raise ValueError(f"invalid unit or physical type {target!r}.") from None
else:
return ptype
else:
return unit
# could be a type hint
origin = T.get_origin(target)
if origin is T.Union:
return [_parse_annotation(t) for t in T.get_args(target)]
elif origin is not T.Annotated: # can't be Quantity[]
return False
# parse type hint
cls, *annotations = T.get_args(target)
if not issubclass(cls, Quantity) or not annotations:
return False
# get unit from type hint
unit, *rest = annotations
if not isinstance(unit, (UnitBase, PhysicalType)):
return False
return unit
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop("equivalencies", [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isinstance(targets, Sequence):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [
t
for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))
]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (
ra
if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra)
)
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [
t for t in target if isinstance(t, (str, UnitBase, PhysicalType))
]
_validate_arg_value(
"return",
wrapped_function.__name__,
return_,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
|
3a5b1737aa933864f2d6c005d39de0b7e19907b715ae3546c981837198033eba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# STDLIB
import numbers
import operator
import re
import warnings
from fractions import Fraction
# THIRD PARTY
import numpy as np
# LOCAL
from astropy import config as _config
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from .core import (
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
get_current_unit_registry,
)
from .format import Base, Latex
from .quantity_helper import can_have_arbitrary_unit, check_output, converters_and_unit
from .quantity_helper.function_helpers import (
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .structured import StructuredUnit, _structured_unit_like_dtype
from .utils import is_effectively_unity
__all__ = [
"Quantity",
"SpecificTypeQuantity",
"QuantityInfoBase",
"QuantityInfo",
"allclose",
"isclose",
]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ["Quantity.*"]
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity.
"""
latex_array_threshold = _config.ConfigItem(
100,
"The maximum size an array Quantity can be before its LaTeX "
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
"negative number means that the value will instead be whatever numpy "
"gets from get_printoptions.",
)
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities.
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
def __len__(self):
return len(self._dataiter)
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
If `None`, the normal `numpy.dtype` introspection is used, e.g.
preventing upcasting of integers.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be prepended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __class_getitem__(cls, unit_shape_dtype):
"""Quantity Type Hints.
Unit-aware type hints are ``Annotated`` objects that encode the class,
the unit, and possibly shape and dtype information, depending on the
python and :mod:`numpy` versions.
Schematically, ``Annotated[cls[shape, dtype], unit]``
As a classmethod, the type is the class, ie ``Quantity``
produces an ``Annotated[Quantity, ...]`` while a subclass
like :class:`~astropy.coordinates.Angle` returns
``Annotated[Angle, ...]``.
Parameters
----------
unit_shape_dtype : :class:`~astropy.units.UnitBase`, str, `~astropy.units.PhysicalType`, or tuple
Unit specification, can be the physical type (ie str or class).
If tuple, then the first element is the unit specification
and all other elements are for `numpy.ndarray` type annotations.
Whether they are included depends on the python and :mod:`numpy`
versions.
Returns
-------
`typing.Annotated`, `astropy.units.Unit`, or `astropy.units.PhysicalType`
Return type in this preference order:
* `typing.Annotated`
* `astropy.units.Unit` or `astropy.units.PhysicalType`
Raises
------
TypeError
If the unit/physical_type annotation is not Unit-like or
PhysicalType-like.
Examples
--------
Create a unit-aware Quantity type annotation
>>> Quantity[Unit("s")]
Annotated[Quantity, Unit("s")]
See Also
--------
`~astropy.units.quantity_input`
Use annotations for unit checks on function arguments and results.
Notes
-----
With Python 3.9+ or :mod:`typing_extensions`, |Quantity| types are also
static-type compatible.
"""
from typing import Annotated
# process whether [unit] or [unit, shape, ptype]
if isinstance(unit_shape_dtype, tuple): # unit, shape, dtype
target = unit_shape_dtype[0]
shape_dtype = unit_shape_dtype[1:]
else: # just unit
target = unit_shape_dtype
shape_dtype = ()
# Allowed unit/physical types. Errors if neither.
try:
unit = Unit(target)
except (TypeError, ValueError):
from astropy.units.physical import get_physical_type
try:
unit = get_physical_type(target)
except (TypeError, ValueError, KeyError): # KeyError for Enum
raise TypeError(
"unit annotation is not a Unit or PhysicalType"
) from None
# Quantity does not (yet) properly extend the NumPy generics types,
# introduced in numpy v1.22+, instead just including the unit info as
# metadata using Annotated.
# TODO: ensure we do interact with NDArray.__class_getitem__.
return Annotated[cls, unit]
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# inexact -> upcast to float dtype
float_default = dtype is np.inexact
if float_default:
dtype = None
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and isinstance(value, cls)):
value = value.view(cls)
if float_default and value.dtype.kind in "iu":
dtype = float
return np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (
r"\s*[+-]?"
r"((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|"
r"([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))"
r"([eE][+-]?\d+)?"
r"[.+-]?"
)
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError(
f'Cannot parse "{value}" as a {cls.__name__}. It does not '
"start with a number."
)
unit_string = v.string[v.end() :].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif isiterable(value) and len(value) > 0:
# Iterables like lists and tuples.
if all(isinstance(v, Quantity) for v in value):
# If a list/tuple containing only quantities, convert all
# to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
elif (
dtype is None
and not hasattr(value, "dtype")
and isinstance(unit, StructuredUnit)
):
# Special case for list/tuple of values and a structured unit:
# ``np.array(value, dtype=None)`` would treat tuples as lower
# levels of the array, rather than as elements of a structured
# array, so we use the structure of the unit to help infer the
# structured dtype of the value.
dtype = unit._recursively_get_dtype(value)
using_default_unit = False
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
using_default_unit = True
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError(
f"The unit attribute {value.unit!r} of the input could "
"not be parsed as an astropy Unit."
) from exc
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(
value, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin
)
# For no-user-input unit, make sure the constructed unit matches the
# structure of the data.
if using_default_unit and value.dtype.names is not None:
unit = value_unit = _structured_unit_like_dtype(value_unit, value.dtype)
# check that array contains numbers or long int objects
if value.dtype.kind in "OSU" and not (
value.dtype.kind == "O" and isinstance(value.item(0), numbers.Number)
):
raise TypeError("The value must be a valid Python or Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if float_default and value.dtype.kind in "iuO":
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, "_quantity_class", cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Copy over the unit and possibly info. Note that the only way the
# unit can already be set is if one enters via _new_view(), where the
# unit is often different from that of self, and where propagation of
# info is not always desirable.
if self._unit is None:
unit = getattr(obj, "_unit", None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if "info" in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError(
"__array_wrap__ should not be used with a context any more since all "
"use should go through array_function. Please raise an issue on "
"https://github.com/astropy/astropy"
)
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity` or `NotImplemented`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
try:
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get("out", None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs["out"] = (out_array,) if function.nout == 1 else out_array
if method == "reduce" and "initial" in kwargs and unit is not None:
# Special-case for initial argument for reductions like
# np.add.reduce. This should be converted to the output unit as
# well, which is typically the same as the input unit (but can
# in principle be different: unitless for np.equal, radian
# for np.arctan2, though those are not necessarily useful!)
kwargs["initial"] = self._to_own_unit(
kwargs["initial"], check_precision=False, unit=unit
)
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, "value", input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
except (TypeError, ValueError, AttributeError) as e:
out_normalized = kwargs.get("out", tuple())
inputs_and_outputs = inputs + out_normalized
ignored_ufunc = (
None,
np.ndarray.__array_ufunc__,
type(self).__array_ufunc__,
)
if not all(
getattr(type(io), "__array_ufunc__", None) in ignored_ufunc
for io in inputs_and_outputs
):
return NotImplemented
else:
raise e
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in zip(result, unit, out)
)
if out is None:
# View the result array as a Quantity with the proper unit.
return (
result
if unit is None
else self._new_view(result, unit, propagate_info=False)
)
elif isinstance(out, Quantity):
# For given Quantity output, just set the unit. We know the unit
# is not None and the output is of the correct Quantity subclass,
# as it was passed through check_output.
# (We cannot do this unconditionally, though, since it is possible
# for out to be ndarray and the unit to be dimensionless.)
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None, propagate_info=True):
"""Create a Quantity view of some array-like input, and set the unit.
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
propagate_info : bool, optional
Whether to transfer ``info`` if present. Default: `True`, as
appropriate for, e.g., unit conversions or slicing, where the
nature of the object does not change.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, "_quantity_class", Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
if propagate_info and "info" in self.__dict__:
view.info = self.info
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initializer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
if isinstance(self._unit, StructuredUnit) or isinstance(
unit, StructuredUnit
):
unit = StructuredUnit(unit, self.dtype)
else:
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict="silent")
if not isinstance(unit, (UnitBase, StructuredUnit)):
raise UnitTypeError(
f"{self.__class__.__name__} instances require normal units, "
f"not {unit.__class__} instances."
)
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
if not self.dtype.names or isinstance(self.unit, StructuredUnit):
# Standard path, let unit to do work.
return self.unit.to(
unit, self.view(np.ndarray), equivalencies=equivalencies
)
else:
# The .to() method of a simple unit cannot convert a structured
# dtype, so we work around it, by recursing.
# TODO: deprecate this?
# Convert simple to Structured on initialization?
result = np.empty_like(self.view(np.ndarray))
for name in self.dtype.names:
result[name] = self[name]._to_value(unit, equivalencies)
return result
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See Also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See Also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
elif not self.dtype.names:
# For non-structured, we attempt a short-cut, where we just get
# the scale. If that is 1, we do not have to do anything.
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
else:
# For structured arrays, we go the default route.
value = self._to_value(unit, equivalencies)
# Index with empty tuple to decay array scalars in to numpy scalars.
return value if value.shape else value[()]
value = property(
to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""",
)
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
def _recursively_apply(self, func):
"""Apply function recursively to every field.
Returns a copy with the result.
"""
result = np.empty_like(self)
result_value = result.view(np.ndarray)
result_unit = ()
for name in self.dtype.names:
part = func(self[name])
result_value[name] = part.value
result_unit += (part.unit,)
result._set_unit(result_unit)
return result
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("si"))
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale, si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
if self.dtype.names:
return self._recursively_apply(operator.attrgetter("cgs"))
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale, cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return super().__dir__()
dir_values = set(super().__dir__())
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(equivalencies):
dir_values.update(equivalent.names)
return sorted(dir_values)
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member"
)
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies
)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'"
)
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting, and does not
# deal well with structured arrays (nor does the ufunc).
def __eq__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return False
except Exception:
return NotImplemented
return self.value.__eq__(other_value)
def __ne__(self, other):
try:
other_value = self._to_own_unit(other)
except UnitsError:
return True
except Exception:
return NotImplemented
return self.value.__ne__(other_value)
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented # try other.__rlshift__(self)
try:
factor = self.unit._to(other)
except UnitConversionError: # incompatible, or requires an Equivalency
return NotImplemented
except AttributeError: # StructuredUnit does not have `_to`
# In principle, in-place might be possible.
return NotImplemented
view = self.view(np.ndarray)
try:
view *= factor # operates on view
except TypeError:
# The error is `numpy.core._exceptions._UFuncOutputCastingError`,
# which inherits from `TypeError`.
return NotImplemented
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(
">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning,
)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
"""Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), other * self.unit, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
"""
Right Multiplication between `Quantity` objects and other objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(
self.value.copy(), self.unit / other, propagate_info=False
)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
"""Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(
1.0 / self.value, other / self.unit, propagate_info=False
)
return super().__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(
self.value ** float(other), self.unit**other, propagate_info=False
)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value is not"
" iterable"
)
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
if isinstance(key, str) and isinstance(self.unit, StructuredUnit):
return self._new_view(
self.view(np.ndarray)[key], self.unit[key], propagate_info=False
)
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value "
"does not support indexing"
)
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
if isinstance(i, str):
# Indexing will cause a different unit, so by doing this in
# two steps we effectively try with the right unit.
self[i][...] = value
return
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and "info" in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""This method raises ValueError, since truthiness of quantities is ambiguous,
especially for logarithmic units and temperatures. Use explicit comparisons.
"""
raise ValueError(
f"{type(self).__name__} truthiness is ambiguous, especially for logarithmic units"
" and temperatures. Use explicit comparisons."
)
def __len__(self):
if self.isscalar:
raise TypeError(
f"'{self.__class__.__name__}' object with a scalar value has no len()"
)
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError(
"only dimensionless scalar quantities can be "
"converted to Python scalars"
)
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError(
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = " " + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string that uses
negative exponents instead of fractions
subfmt : str, optional
Subformat of the result. For the moment, only used for
``format='latex'`` and ``format='latex_inline'``. Supported
values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt
)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
formats["latex_inline"] = formats["latex"]
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f"{self.value}{self._unitstr:s}"
else:
# np.array2string properly formats arrays as well as scalars
return (
np.array2string(self.value, precision=precision, floatmode="fixed")
+ self._unitstr
)
# else, for the moment we assume format="latex" or "latex_inline".
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value, format_spec=format_spec)
def complex_formatter(value):
return "({}{}i)".format(
Latex.format_exponential_notation(value.real, format_spec=format_spec),
Latex.format_exponential_notation(
value.imag, format_spec="+" + format_spec
),
)
# The view is needed for the scalar case - self.value might be float.
latex_value = np.array2string(
self.view(np.ndarray),
threshold=(
conf.latex_array_threshold
if conf.latex_array_threshold > -1
else pops["threshold"]
),
formatter={
"float_kind": float_formatter,
"complex_kind": complex_formatter,
},
max_line_width=np.inf,
separator=",~",
)
latex_value = latex_value.replace("...", r"\dots")
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
if self.unit is None:
latex_unit = _UNIT_NOT_INITIALISED
elif format == "latex":
latex_unit = self.unit._repr_latex_()[1:-1] # note this is unicode
elif format == "latex_inline":
latex_unit = self.unit.to_string(format="latex_inline")[1:-1]
delimiter_left, delimiter_right = formats[format][subfmt]
return rf"{delimiter_left}{latex_value} \; {latex_unit}{delimiter_right}"
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = "<" + self.__class__.__name__ + " "
arrstr = np.array2string(
self.view(np.ndarray), separator=", ", prefix=prefixstr
)
return f"{prefixstr}{arrstr}{self._unitstr:s}>"
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format="latex", subfmt="inline")
def __format__(self, format_spec):
try:
return self.to_string(format=format_spec)
except ValueError:
# We might have a unit format not implemented in `to_string()`.
if format_spec in Base.registry:
if self.unit is dimensionless_unscaled:
return f"{self.value}"
else:
return f"{self.value} {format(self.unit, format_spec)}"
# Can the value be formatted on its own?
try:
return f"{format(self.value, format_spec)}{self._unitstr:s}"
except ValueError:
# Format the whole thing as a single string.
return format(f"{self.value}{self._unitstr:s}", format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, "scale"):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError(
"cannot make a list of Quantities. Get list of values with"
" q.value.tolist()."
)
def _to_own_unit(self, value, check_precision=True, *, unit=None):
"""Convert value to one's own unit (or that given).
Here, non-quantities are treated as dimensionless, and care is taken
for values of 0, infinity or nan, which are allowed to have any unit.
Parameters
----------
value : anything convertible to `~astropy.units.Quantity`
The value to be converted to the requested unit.
check_precision : bool
Whether to forbid conversion of float to integer if that changes
the input number. Default: `True`.
unit : `~astropy.units.Unit` or None
The unit to convert to. By default, the unit of ``self``.
Returns
-------
value : number or `~numpy.ndarray`
In the requested units.
"""
if unit is None:
unit = self.unit
try:
_value = value.to_value(unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if value is np.ma.masked or (
value is np.ma.masked_print_option and self.dtype.kind == "O"
):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if not hasattr(value, "unit") and can_have_arbitrary_unit(
as_quantity.value
):
_value = as_quantity.value
else:
raise
if self.dtype.kind == "i" and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all((self_dtype_array == _value) | np.isnan(_value)):
raise TypeError(
"cannot convert value type to array type without precision loss"
)
# Setting names to ensure things like equality work (note that
# above will have failed already if units did not match).
if self.dtype.names:
_value.dtype.names = self.dtype.names
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] + (self._to_own_unit(args[-1]),)))
def tostring(self, order="C"):
"""Not implemented, use ``.value.tostring()`` instead."""
raise NotImplementedError(
"cannot write Quantities to string. Write array with"
" q.value.tostring(...)."
)
def tobytes(self, order="C"):
"""Not implemented, use ``.value.tobytes()`` instead."""
raise NotImplementedError(
"cannot write Quantities to bytes. Write array with q.value.tobytes(...)."
)
def tofile(self, fid, sep="", format="%s"):
"""Not implemented, use ``.value.tofile()`` instead."""
raise NotImplementedError(
"cannot write Quantities to file. Write array with q.value.tofile(...)"
)
def dump(self, file):
"""Not implemented, use ``.value.dump()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to file. Write array with q.value.dump()"
)
def dumps(self):
"""Not implemented, use ``.value.dumps()`` instead."""
raise NotImplementedError(
"cannot dump Quantities to string. Write array with q.value.dumps()"
)
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode="raise"):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode="raise"):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode="raise"):
raise NotImplementedError(
"cannot choose based on quantity. Choose using array with"
" q.value.choose(...)"
)
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind="quicksort", order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(
np.array(self), self._to_own_unit(v, check_precision=False), *args, **kwargs
) # avoid numpy 1.6 problem
if NUMPY_LT_1_22:
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
else:
def argmax(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmax(axis=axis, out=out, keepdims=keepdims)
def argmin(self, axis=None, out=None, *, keepdims=False):
return self.view(np.ndarray).argmin(axis=axis, out=out, keepdims=keepdims)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn(
f"function '{function.__name__}' is not known to astropy's Quantity."
" Will run it anyway, hoping it will treat ndarray subclasses"
" correctly. Please raise an issue at"
" https://github.com/astropy/astropy/issues.",
AstropyWarning,
)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(
issubclass(t, np.ndarray) and not issubclass(t, Quantity) for t in types
):
raise TypeError(
f"the Quantity implementation cannot handle {function} "
"with the given arguments."
) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple(
(arg.value if isinstance(arg, Quantity) else arg) for arg in args
)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs["out"] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype, out=out)
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.var,
axis,
dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
where=where,
unit=self.unit**2,
)
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
return self._wrap_function(
np.std, axis, dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
return self._wrap_function(
np.mean, axis, dtype, out=out, keepdims=keepdims, where=where
)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, "unit", dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)"
)
def any(self, axis=None, out=None):
raise TypeError(
"cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)"
)
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
if NUMPY_LT_1_22:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis, out=out, keepdims=keepdims)
else:
@deprecated("5.3", alternative="np.nansum", obj_type="method")
def nansum(
self, axis=None, out=None, keepdims=False, *, initial=None, where=True
):
if initial is not None:
initial = self._to_own_unit(initial)
return self._wrap_function(
np.nansum,
axis,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitialized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'".format(
type(self).__name__, self._equivalent_unit
)
+ (
", but no unit was given."
if unit is None
else f", so cannot set it to '{unit}'."
)
)
super()._set_unit(unit)
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See Also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
c408a42866c27f74eb2ac3581385404b232d541f3083477a4cc59ff72b0482ab | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import functools
from collections import namedtuple
import numpy as np
from astropy import units as u
from astropy.utils import isiterable
from . import angle_formats as form
__all__ = ["Angle", "Latitude", "Longitude"]
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple("hms_tuple", ("h", "m", "s"))
dms_tuple = namedtuple("dms_tuple", ("d", "m", "s"))
signed_dms_tuple = namedtuple("signed_dms_tuple", ("sign", "d", "m", "s"))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an
`~astropy.coordinates.Angle` object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.0
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.0
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif isiterable(angle) and not (
isinstance(angle, np.ndarray) and angle.dtype.kind not in "SUVO"
):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy, **kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit == u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""The angle's value in hours, as a named tuple with ``(h, m, s)`` members."""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""The angle's value in degrees, as a ``(d, m, s)`` named tuple."""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""The angle's value in degrees, as a ``(sign, d, m, s)`` named tuple.
The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``.
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(
np.sign(self.degree), *form.degrees_to_dms(np.abs(self.degree))
)
def to_string(
self,
unit=None,
decimal=False,
sep="fromunit",
precision=None,
alwayssign=False,
pad=False,
fields=3,
format=None,
):
"""A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `False`, the returned string will be in sexagesimal form
if possible (for units of degrees or hourangle). If `True`,
a decimal representation will be used. In that case, no unit
will be appended if ``format`` is not explicitly given.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
"generic": {u.degree: "dms", u.hourangle: "hms"},
"latex": {
u.degree: [r"^\circ", r"{}^\prime", r"{}^{\prime\prime}"],
u.hourangle: [r"^{\mathrm{h}}", r"^{\mathrm{m}}", r"^{\mathrm{s}}"],
},
"unicode": {u.degree: "°′″", u.hourangle: "ʰᵐˢ"},
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators["latex_inline"] = separators["latex"]
# Default separators are as for generic.
separators[None] = separators["generic"]
# Create an iterator so we can format each element of what
# might be an array.
if not decimal and (unit_is_deg := unit == u.degree or unit == u.hourangle):
# Sexagesimal.
if sep == "fromunit":
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
sep = separators[format][unit]
func = functools.partial(
form.degrees_to_string if unit_is_deg else form.hours_to_string,
precision=precision,
sep=sep,
pad=pad,
fields=fields,
)
else:
if sep != "fromunit":
raise ValueError(
f"'{unit}' can not be represented in sexagesimal notation"
)
func = ("{:g}" if precision is None else f"{{0:0.{precision}f}}").format
# Don't add unit by default for decimal.
# TODO: could we use Quantity.to_string() here?
if not (decimal and format is None):
unit_string = unit.to_string(format=format)
if format == "latex" or format == "latex_inline":
# Remove $ and add space in front if unit is not a superscript.
if "^" in unit_string:
unit_string = unit_string[1:-1]
else:
unit_string = r"\;" + unit_string[1:-1]
elif len(unit_string) > 1:
# Length one for angular units can only happen for
# superscript degree, arcmin, arcsec, hour, minute, second,
# and those should not get an extra space.
unit_string = " " + unit_string
format_func = func
func = lambda x: format_func(x) + unit_string
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith("-"):
s = "+" + s
if format == "latex" or format == "latex_inline":
s = f"${s}$"
return s
s = f"{val}"
return s
values = self.to_value(unit)
format_ufunc = np.vectorize(do_format, otypes=["U"])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# Catch any invalid warnings from the floor division.
with np.errstate(invalid="ignore"):
wraps = (self_angle - wrap_angle_floor) // a360
valid = np.isfinite(wraps) & (wraps != 0)
if np.any(valid):
self_angle -= wraps * a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``.
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={"all": formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format="latex")
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself.
"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
# For speed, compare using "is", which is not strictly guaranteed to hold,
# but if it doesn't we'll just convert correctly in the 'else' clause.
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = 0.5 * np.pi
else:
limit = u.degree.to(angles.unit, 90.0)
# Ensure ndim>=1 so that comparison is done using the angle dtype.
# Otherwise, e.g., np.array(np.pi/2, 'f4') > np.pi/2 will yield True.
# (This feels like a bug -- see https://github.com/numpy/numpy/issues/23247)
# Note that we should avoid using `angles.dtype` directly since for
# structured arrays like Distribution this will be `void`.
angles_view = angles.view(np.ndarray)[np.newaxis]
invalid_angles = np.any(angles_view < -limit) or np.any(angles_view > limit)
if invalid_angles:
raise ValueError(
"Latitude angle(s) must be within -90 deg <= angle <= 90 deg, "
f"got {angles.to(u.degree)}"
)
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ("wrap_angle",)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError(
"A Longitude angle cannot be created from a Latitude angle."
)
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, "wrap_angle", self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, "_wrap_angle", self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
bee1c21706d21278bda1735d9ce7ff2773f11ecf8c10e59d92a4ec2b8e87339f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING, Any, TypeVar
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import (
CosmologyFromFormat,
CosmologyRead,
CosmologyToFormat,
CosmologyWrite,
)
from .parameter import Parameter
if TYPE_CHECKING: # pragma: no cover
from collections.abc import Mapping
from astropy.cosmology.funcs.comparison import _FormatType
# Originally authored by Andrew Becker ([email protected]),
# and modified by Neil Crighton ([email protected]), Roban Kramer
# ([email protected]), and Nathaniel Starkman ([email protected]).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
##############################################################################
# Parameters
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
# typing
_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
##############################################################################
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the values
of the parameters. That is, all of the above attributes (except meta) are
read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__: tuple[str, ...] = ()
__all_parameters__: tuple[str, ...] = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [
parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters
]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# register as a Cosmology subclass
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
_modname = self.name + " (modified)"
kwargs.setdefault("name", (_modname if self.name is not None else None))
# mix new meta into existing, preferring the former.
meta = meta if meta is not None else {}
new_meta = {**self.meta, **meta}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Instantiate, respecting args vs kwargs
cloned = type(self)(*ba.args, **ba.kwargs)
# Check if nothing has changed.
# TODO! or should return self?
if (cloned.name == _modname) and not meta and cloned.is_equivalent(self):
cloned._name = self.name
return cloned
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool:
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object to which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
``format`` is broadcast to match the shape of ``other``.
Note that the cosmology arguments are not broadcast against
``format``, so it cannot determine the output shape.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
from .funcs import cosmology_equal
try:
return cosmology_equal(
self, other, format=(None, format), allow_equivalent=True
)
except Exception:
# `is_equivalent` allows `other` to be any object and returns False
# if `other` cannot be converted to a Cosmology, rather than
# raising an Exception.
return False
def __equiv__(self, other: Any, /) -> bool:
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if ``other`` is from a different class.
`True` if ``other`` is of the same class and has matching parameters
and parameter values.
`False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# Check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
return set(self.__all_parameters__) == set(other.__all_parameters__) and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
def __eq__(self, other: Any, /) -> bool:
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
eq = (
# non-Parameter checks: name
self.name == other.name
# check all parameters in 'other' match those in 'self' and 'other'
# has no extra parameters (latter part should never happen b/c same
# class) TODO! element-wise when there are array cosmologies
and set(self.__all_parameters__) == set(other.__all_parameters__)
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
)
return eq
# ---------------------------------------------------------------
def __repr__(self):
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f'name="{self.name}", '
# nicely formatted parameters
fmtps = (f"{k}={getattr(self, k)}" for k in self.__parameters__)
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
__all_parameters__: tuple[str, ...]
__parameters__: tuple[str, ...]
def __init_subclass__(cls: type[_FlatCosmoT]) -> None:
super().__init_subclass__()
# Determine the non-flat class.
# This will raise a TypeError if the MRO is inconsistent.
cls.__nonflatclass__
# ===============================================================
@classmethod # TODO! make metaclass-method
def _get_nonflat_cls(
cls, kls: type[_CosmoT] | None = None
) -> type[Cosmology] | None:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass.
"""
_kls = cls if kls is None else kls
# Find non-flat classes
nonflat: set[type[Cosmology]]
nonflat = {
b
for b in _kls.__bases__
if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)
}
if not nonflat: # e.g. subclassing FlatLambdaCDM
nonflat = {
k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None
}
if len(nonflat) > 1:
raise TypeError(
"cannot create a consistent non-flat class resolution order "
f"for {_kls} with bases {nonflat} at the same inheritance level."
)
if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
return None
return nonflat.pop()
__nonflatclass__ = classproperty(
_get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class."
)
# ===============================================================
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
@abc.abstractmethod
def nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
"""
if to_nonflat:
return self.nonflat.clone(meta=meta, **kwargs)
return super().clone(meta=meta, **kwargs)
# ===============================================================
def __equiv__(self, other):
"""flat-|Cosmology| equivalence.
Use `astropy.cosmology.funcs.cosmology_equal` with
``allow_equivalent=True`` for actual checks!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object to which to compare for equivalence.
Returns
-------
bool or `NotImplemented`
`True` if ``other`` is of the same class / non-flat class (e.g.
|FlatLambdaCDM| and |LambdaCDM|) has matching parameters and
parameter values.
`False` if ``other`` is of the same class but has different
parameters.
`NotImplemented` otherwise.
"""
if isinstance(other, FlatCosmologyMixin):
return super().__equiv__(other) # super gets from Cosmology
# check if `other` is the non-flat version of this class this makes the
# assumption that any further subclass of a flat cosmo keeps the same
# physics.
if not issubclass(other.__class__, self.__nonflatclass__):
return NotImplemented
# Check if have equivalent parameters and all parameters in `other`
# match those in `self`` and `other`` has no extra parameters.
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
# equal
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__parameters__
)
# flatness check
and other.is_flat
)
return params_eq
# -----------------------------------------------------------------------------
def __getattr__(attr):
from . import flrw
if hasattr(flrw, attr) and attr not in ("__path__",):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and "
f"should be imported as ``from astropy.cosmology import {attr}``."
" In future this will raise an exception.",
AstropyDeprecationWarning,
)
return getattr(flrw, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
48388fad9c881b092b77e62be86961684094e00c8601a7d1d8d83c0ee8afa20a | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.stats.circstats import (
_length,
circcorrcoef,
circmean,
circmoment,
circvar,
rayleightest,
vonmisesmle,
vtest,
)
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test__length():
# testing against R CircStats package
# Ref. [1] pages 6 and 125
weights = np.array([12, 1, 6, 1, 2, 1, 1])
answer = 0.766282
data = np.array([0, 3.6, 36, 72, 108, 169.2, 324]) * u.deg
assert_allclose(answer, _length(data, weights=weights), atol=1e-4)
def test_circmean():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358]) * u.deg
answer = 48.63 * u.deg
assert_equal(answer, np.around(circmean(data), 2))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_circmean_against_scipy():
import scipy.stats
# testing against scipy.stats.circmean function
# the data is the same as the test before, but in radians
data = np.array(
[0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207, 6.24827872]
)
answer = scipy.stats.circmean(data)
assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
def test_circvar():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358]) * u.deg
answer = 0.1635635
assert_allclose(answer, circvar(data), atol=1e-4)
def test_circmoment():
# testing against R CircStats package
# Ref[1], page 23
data = np.array([51, 67, 40, 109, 31, 358]) * u.deg
# 2nd, 3rd, and 4th moments
# this is the answer given in Ref[1] in radians
answer = np.array([1.588121, 1.963919, 2.685556])
answer = np.around(np.rad2deg(answer) * u.deg, 4)
result = (
np.around(circmoment(data, p=2)[0], 4),
np.around(circmoment(data, p=3)[0], 4),
np.around(circmoment(data, p=4)[0], 4),
)
assert_equal(answer[0], result[0])
assert_equal(answer[1], result[1])
assert_equal(answer[2], result[2])
# testing lengths
answer = np.array([0.4800428, 0.236541, 0.2255761])
assert_allclose(
answer,
(circmoment(data, p=2)[1], circmoment(data, p=3)[1], circmoment(data, p=4)[1]),
atol=1e-4,
)
def test_circcorrcoef():
# testing against R CircStats package
# Ref[1], page 180
# fmt: off
alpha = np.array(
[
356, 97, 211, 232, 343, 292, 157, 302, 335, 302, 324,
85, 324, 340, 157, 238, 254, 146, 232, 122, 329,
]
) * u.deg
beta = np.array(
[
119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, 45,
47, 108, 221, 270, 119, 248, 270, 45, 23,
]
) * u.deg
# fmt: on
answer = 0.2704648
assert_allclose(answer, circcorrcoef(alpha, beta), atol=1e-4)
def test_rayleightest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36]) * u.deg
# answer was obtained through R CircStats function r.test(x)
answer = (0.00640418, 0.9202565)
result = (rayleightest(data), _length(data))
assert_allclose(answer[0], result[0], atol=1e-4)
assert_allclose(answer[1], result[1], atol=1e-4)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_vtest():
# testing against R CircStats package
data = np.array([190.18, 175.48, 155.95, 217.83, 156.36]) * u.deg
# answer was obtained through R CircStats function v0.test(x)
answer = 0.9994725
assert_allclose(answer, vtest(data), atol=1e-5)
def test_vonmisesmle():
# testing against R CircStats package
# testing non-Quantity
# fmt: off
data = np.array(
[
3.3699057, 4.0411630, 0.5014477, 2.6223103, 3.7336524,
1.8136389, 4.1566039, 2.7806317, 2.4672173, 2.8493644,
]
)
# fmt: on
# answer was obtained through R CircStats function vm.ml(x)
answer = (3.006514, 1.474132)
assert_allclose(answer[0], vonmisesmle(data)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data)[1], atol=1e-5)
# testing with Quantity
data = np.rad2deg(data) * u.deg
answer = np.rad2deg(3.006514) * u.deg
assert_equal(np.around(answer, 3), np.around(vonmisesmle(data)[0], 3))
# testing for weighted vonmisesmle
data = np.array(
[
np.pi / 2,
np.pi,
np.pi / 2,
]
) # this data has twice more np.pi/2 than np.pi
# get answer using astropy vonmisesmle to test
answer = vonmisesmle(data)
data_to_weigh = np.array([np.pi / 2, np.pi])
weights = [2, 1]
assert_allclose(
answer[0], vonmisesmle(data_to_weigh, weights=weights)[0], atol=1e-5
)
assert_allclose(
answer[1], vonmisesmle(data_to_weigh, weights=weights)[1], atol=1e-5
)
# testing for axis argument (stacking the data from the first test)
data = np.array(
[
[
3.3699057,
4.0411630,
0.5014477,
2.6223103,
3.7336524,
1.8136389,
4.1566039,
2.7806317,
2.4672173,
2.8493644,
],
[
3.3699057,
4.0411630,
0.5014477,
2.6223103,
3.7336524,
1.8136389,
4.1566039,
2.7806317,
2.4672173,
2.8493644,
],
]
)
# answer should be duplicated
answer = (np.array([3.006514, 3.006514]), np.array([1.474132, 1.474132]))
assert_allclose(answer[0], vonmisesmle(data, axis=1)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data, axis=1)[1], atol=1e-5)
# same test for Quantity
data = np.rad2deg(data) * u.deg
answer = (np.rad2deg(answer[0]) * u.deg, answer[1])
assert_allclose(answer[0], vonmisesmle(data, axis=1)[0], atol=1e-5)
assert_allclose(answer[1], vonmisesmle(data, axis=1)[1], atol=1e-5)
|
2bb9537a9bfe662d248fd3e33ad3dd8bdb65b7a362f272d4bae625c3b3df3df7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to using quantities/units on parameters of models.
"""
import numpy as np
import pytest
from astropy import coordinates as coord
from astropy import units as u
from astropy.modeling.core import Fittable1DModel, InputParameterError
from astropy.modeling.models import (
Const1D,
Gaussian1D,
Pix2Sky_TAN,
RotateNative2Celestial,
Rotation2D,
BlackBody,
)
from astropy.modeling.parameters import Parameter, ParameterDefinitionError
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
class BaseTestModel(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
MESSAGE = (
r"The .* parameter should be given as a .* because it was originally"
r" initialized as a .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.amplitude = 2
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
MESSAGE = (
r"Cannot attach units to parameters that were not initially specified with"
r" units"
)
with pytest.raises(ValueError, match=MESSAGE):
g.amplitude.unit = u.Jy
# But changing to another unit should not, even if it is an equivalent unit
MESSAGE = (
r"Cannot change the unit attribute directly, instead change the parameter to a"
r" new quantity"
)
with pytest.raises(ValueError, match=MESSAGE):
g.mean.unit = u.cm
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
MESSAGE = (
r"The .value property on parameters should be set to unitless values, not"
r" Quantity objects.*"
)
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.value = 3 * u.Jy
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
MESSAGE = r"The .quantity attribute should be set to a Quantity object"
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.quantity = 3
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
MESSAGE = (
r"parameter default 1.0 m does not have units equivalent to the required"
r" unit Jy"
)
with pytest.raises(ParameterDefinitionError, match=MESSAGE):
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
@pytest.mark.parametrize(("unit", "default"), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
MESSAGE = r".* requires a Quantity for parameter .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(1.0)
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m**2))
assert (2 * u.m) * g.mean == (2 * (u.m**2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
MESSAGE = (
r"Can only apply 'add' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean + 1
with pytest.raises(UnitsError, match=MESSAGE):
1 + g.mean
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
MESSAGE = (
r"Can only apply 'less' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < 2
with pytest.raises(UnitsError, match=MESSAGE):
2 > g.mean
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < [3, 4]
with pytest.raises(UnitsError, match=MESSAGE):
[3, 4] > g.mean
def test_parameters_compound_models():
Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
rot | n2c
def test_magunit_parameter():
"""Regression test for bug reproducer in issue #13133"""
unit = u.ABmag
c = -20.0 * unit
model = Const1D(c)
assert model(-23.0 * unit) == c
def test_log_getter():
"""Regression test for issue #14511"""
x = 6000 * u.AA
mdl_base = BlackBody(temperature=5000 * u.K, scale=u.Quantity(1))
class CustomBlackBody(BlackBody):
scale = Parameter(
"scale",
default=1,
bounds=(0, None),
getter=np.log,
setter=np.exp,
unit=u.dimensionless_unscaled,
)
mdl = CustomBlackBody(temperature=5000 * u.K, scale=u.Quantity(np.log(1)))
assert mdl.scale == np.log(1)
assert_quantity_allclose(mdl(x), mdl_base(x))
def test_sqrt_getter():
"""Regression test for issue #14511"""
x = 1 * u.m
mdl_base = Gaussian1D(mean=32 * u.m, stddev=3 * u.m)
class CustomGaussian1D(Gaussian1D):
mean = Parameter(
"mean",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
stddev = Parameter(
"stddev",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
mdl = CustomGaussian1D(mean=np.sqrt(32 * u.m), stddev=np.sqrt(3 * u.m))
assert mdl.mean == np.sqrt(32 * u.m)
assert (
mdl.mean._internal_value == np.sqrt(32) ** 2
) # numerical inaccuracy results in 32.00000000000001
assert mdl.mean._internal_unit == u.m
assert mdl.stddev == np.sqrt(3 * u.m)
assert (
mdl.stddev._internal_value == np.sqrt(3) ** 2
) # numerical inaccuracy results in 3.0000000000000004
assert mdl.stddev._internal_unit == u.m
assert_quantity_allclose(mdl(x), mdl_base(x))
|
7b227494173713593c812ce36ba1099378a2ec999d9e37a80ff676d859f50468 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import (
InputParameterError,
Parameter,
_tofloat,
param_repr_oneline,
)
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc) ** 2 + (y - yc) ** 2
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name="alpha", default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
MESSAGE = r"Parameter of .* could not be converted to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat("test")
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
MESSAGE = r"Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(True)
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(False)
# other
class Value:
pass
MESSAGE = r"Don't know how to convert parameter of .* to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(Value)
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter("alpha", default=1)
assert p.name == "alpha"
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = "beta"
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter("alpha", default=42)
num = 42.0
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par**val == num**val
assert val**par == val**num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.0)
m1b = Parameter(default=5.0)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.0)
class M3(M2):
m3d = Parameter(default=20.0)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.0
assert mod.m1b == 5.0
assert mod.m2c == 11.0
assert mod.m3d == 20.0
for key in ["m1a", "m1b", "m2c", "m3d"]:
assert key in mod.__dict__
assert mod.param_names == ("m1a", "m1b", "m2c", "m3d")
def test_param_metric():
mod = M3()
assert mod._param_metrics["m1a"]["slice"] == slice(0, 1)
assert mod._param_metrics["m1b"]["slice"] == slice(1, 2)
assert mod._param_metrics["m2c"]["slice"] == slice(2, 3)
assert mod._param_metrics["m3d"]["slice"] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1.0, 5.0, 11.0, 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename("data/idcompspec.fits")
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields["order"])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30.0, 40.0, 50.0, 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3.0, 4.0, 5.0, 6.0, 7.0]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0.0, 0.0, 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0.0, 0.0, 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.0
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
MESSAGE = (
r"Input parameter values not compatible with the model parameters array: .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
MESSAGE = (
r"Value for parameter c0 does not match shape or size\nexpected by model .*"
r" vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
MESSAGE = (
r"Value for parameter amplitude does not match shape or size\nexpected by"
r" model .* vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array(
[
4826.1066602783685,
952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317,
]
),
rtol=10 ** (-2),
)
def testPolynomial1D(self):
d = {"c0": 11, "c1": 12, "c2": 13, "c3": 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(
p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {
"c0_0": [2, 3],
"c1_0": [1, 2],
"c2_0": [4, 5],
"c0_1": [1, 1],
"c0_2": [2, 2],
"c1_1": [5, 5],
}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5, 1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
MESSAGE = r"bounds may not be specified simultaneously with min or max .*"
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), max=2, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, max=2, name="test")
# Setters
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
MESSAGE = "{} value must be a number or a Quantity"
with pytest.raises(TypeError, match=MESSAGE.format("Min")):
param.bounds = ("test", None)
with pytest.raises(TypeError, match=MESSAGE.format("Max")):
param.bounds = (None, "test")
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name="test", default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
MESSAGE = r"Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError, match=MESSAGE):
param[slice(0, 0)] = 2
MESSAGE = r"Input dimension 3 invalid for 'test' parameter with dimension 1"
with pytest.raises(InputParameterError, match=MESSAGE):
param[3] = np.array([5])
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
MESSAGE = r"Cannot attach units to parameters that were .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.m)
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# Force magnitude unit (mag=False)
MESSAGE = r"This parameter does not support the magnitude units such as .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.ABmag, True)
# Force magnitude unit (mag=True)
param._mag = True
param._set_unit(u.ABmag, True)
assert param._unit == u.ABmag
# No force Error (existing unit)
MESSAGE = r"Cannot change the unit attribute directly, instead change the .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.K)
def test_quantity(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name="test", default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
MESSAGE = r"cannot reshape array of size 4 into shape .*"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name="test", default=1)
assert param.shape == ()
# Reshape error
MESSAGE = r"Cannot assign this shape to a scalar quantity"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = (1,)
# single value
param = Parameter(name="test", default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = ()
def test_size(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name="test", default=[1])
assert param.size == 1
param = Parameter(name="test", default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.std is None
assert param._std is None
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.fixed is False
assert param._fixed is False
# Set error
MESSAGE = r"Value must be boolean"
with pytest.raises(ValueError, match=MESSAGE):
param.fixed = 3
# Set
param.fixed = True
assert param.fixed is True
assert param._fixed is True
def test_tied(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.tied is False
assert param._tied is False
# Set error
MESSAGE = r"Tied must be a callable or set to False or None"
with pytest.raises(TypeError, match=MESSAGE):
param.tied = mk.NonCallableMagicMock()
# Set None
param.tied = None
assert param.tied is None
assert param._tied is None
# Set False
param.tied = False
assert param.tied is False
assert param._tied is False
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
MESSAGE = r"This decorator method expects a callable.*"
with pytest.raises(ValueError, match=MESSAGE):
param.validator(mk.NonCallableMagicMock())
def test_validate(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.model is None
assert param._model is None
assert param._model_required is False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter0, getter0]
) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0),
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Bad ufunc
MESSAGE = r"A numpy.ufunc used for Parameter getter/setter .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(np.add, mk.MagicMock())
# Good ufunc
with mk.patch(
"astropy.modeling.parameters._wrap_ufunc", autospec=True
) as mkWrap:
assert (
param._create_value_wrapper(np.negative, mk.MagicMock())
== mkWrap.return_value
)
assert mkWrap.call_args_list == [mk.call(np.negative)]
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required is False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required is True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, "partial", autospec=True) as mkPartial:
assert (
param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
)
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
MESSAGE = r"Parameter getter/setter must be a function .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(wrapper3, mk.MagicMock())
def test_bool(self):
# single value is true
param = Parameter(name="test", default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name="test", default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name="test", default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name="test", default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name="test", default=1)
assert param_repr_oneline(param) == "1."
# Vector value no units
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param_repr_oneline(param) == "[1., 2., 3., 4.]"
# Single value units
param = Parameter(name="test", default=1 * u.m)
assert param_repr_oneline(param) == "1. m"
# Vector value units
param = Parameter(name="test", default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == "[1., 2., 3., 4.] m"
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, 0.1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D(
[12, 10], [3.5, 5.2], stddev=[0.4, 0.7], n_models=2
)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[1.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[11.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13.0, 10.0])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9.0, 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Not broadcastable
TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array(
[
[10, 20],
[30, 40],
[50, 60],
]
)
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(
t2.param_sets
== [
[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]],
]
)
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60, 1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 20, 30], [40, 50, 60]],
]
)
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(
("p1", "p2"),
[
(1, 2),
(1, [2, 3]),
([1, 2], 3),
([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5]),
],
)
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError, match=r".*"):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[10, 20], [30, 40]],
[[1, 2], [3, 4]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(
t2.param_sets
== [
[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]],
]
)
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Can't broadcast different array shapes
TParModel(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]],
n_models=2,
)
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[1, 2], [3, 4]], n_models=2
)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]], [[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
n_models=2,
)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
]
)
assert np.all(
t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4, 5, 6, 7, 8]
)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]],
]
)
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(
t.parameters
== [10, 50, 20, 60, 30, 70, 30, 70, 40, 80, 50, 90, 1, 3, 2, 4, 3, 5]
)
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11), model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
MESSAGE = (
r"Parameter '.*' of shape .* cannot be broadcast with parameter '.*' of"
r" shape .*"
)
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(model(x, y), (x + 1) ** 2 + (y - np.pi * 3) ** 2)
|
954df382e9e4fc25355d4aa0f83a66c5bde62102783b664991f131489bb69593 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# By default, tests should not use the internet.
from astropy.samp import SAMPWarning, conf
from astropy.samp.client import SAMPClient
from astropy.samp.hub import SAMPHubServer
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
def setup_module(module):
conf.use_internet = False
def test_SAMPHubProxy():
"""Test that SAMPHubProxy can be instantiated"""
SAMPHubProxy()
def test_SAMPClient():
"""Test that SAMPClient can be instantiated"""
proxy = SAMPHubProxy()
SAMPClient(proxy)
def test_SAMPIntegratedClient():
"""Test that SAMPIntegratedClient can be instantiated"""
SAMPIntegratedClient()
@pytest.fixture
def samp_hub(request):
"""A fixture that can be used by client tests that require a HUB."""
my_hub = SAMPHubServer()
my_hub.start()
request.addfinalizer(my_hub.stop)
def test_SAMPIntegratedClient_notify_all(samp_hub):
"""Test that SAMP returns a warning if no receiver got the message."""
client = SAMPIntegratedClient()
client.connect()
message = {"samp.mtype": "coverage.load.moc.fits"}
with pytest.warns(SAMPWarning):
client.notify_all(message)
client.disconnect()
def test_reconnect(samp_hub):
"""Test that SAMPIntegratedClient can reconnect.
This is a regression test for bug [#2673]
https://github.com/astropy/astropy/issues/2673
"""
my_client = SAMPIntegratedClient()
my_client.connect()
my_client.disconnect()
my_client.connect()
|
f190e1a623f64bf4e8579ced024b3b86e898c3ff0fdb1a6ebec1e0d3b9322ba4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from contextlib import nullcontext
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.contour import QuadContourSet
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame,
RectangularFrame,
RectangularFrame1D,
)
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
# We cannot use matplotlib.checkdep_usetex() anymore, see
# https://github.com/matplotlib/matplotlib/issues/23244
TEX_UNAVAILABLE = True
MATPLOTLIB_LT_3_7 = Version(matplotlib.__version__) < Version("3.7")
def teardown_function(function):
plt.close("all")
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc("axes", grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmp_path):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring(
"""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""",
sep="\n",
)
@pytest.mark.parametrize("grid_type", ["lines", "contours"])
def test_no_numpy_warnings(ignore_matplotlibrc, tmp_path, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color="white", grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*converting a masked element to nan.*"
)
warnings.filterwarnings(
"ignore", message=r".*No contour levels were found within the data range.*"
)
warnings.filterwarnings(
"ignore", message=r".*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*"
)
warnings.filterwarnings(
"ignore", message=r".*PY_SSIZE_T_CLEAN will be required.*"
)
fig.savefig(tmp_path / "test.png")
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError, match=r"Frame banana not found"):
ax.get_coords_overlay("banana")
with pytest.raises(ValueError, match=r"Unknown frame: banana"):
get_coord_meta("banana")
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, "o", transform=ax.get_transform("galactic"))
def test_scatter_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.scatter_coord(c, marker="o", transform=ax.get_transform("galactic"))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel("Test x label", labelpad=2, color="red")
ax.set_ylabel("Test y label", labelpad=3, color="green")
assert ax.coords[0].axislabels.get_text() == "Test x label"
assert ax.coords[0].axislabels.get_minpad("b") == 2
assert ax.coords[0].axislabels.get_color() == "red"
assert ax.coords[1].axislabels.get_text() == "Test y label"
assert ax.coords[1].axislabels.get_minpad("l") == 3
assert ax.coords[1].axislabels.get_color() == "green"
assert ax.get_xlabel() == "Test x label"
assert ax.get_ylabel() == "Test y label"
GAL_HEADER = fits.Header.fromstring(
"""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""",
sep="\n",
)
def test_slicing_warnings(ignore_matplotlibrc, tmp_path):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
plt.savefig(tmp_path / "test.png")
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 2))
plt.savefig(tmp_path / "test.png")
def test_plt_xlabel_ylabel(tmp_path):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel("Galactic Longitude")
plt.ylabel("Galactic Latitude")
plt.savefig(tmp_path / "test.png")
def test_grid_type_contours_transform(tmp_path):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {
"type": ("scalar", "scalar"),
"unit": (u.m, u.s),
"wrap": (None, None),
"name": ("x", "y"),
}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type="contours")
fig.savefig(tmp_path / "test.png")
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmp_path):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmp_path / "test.png"
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
if MATPLOTLIB_LT_3_7:
ctx = pytest.warns(
UserWarning, match="No contour levels were found within the data range"
)
else:
ctx = nullcontext()
with ctx:
ax.contour(np.zeros((4, 4)), transform=ax.get_transform("world"))
def test_iterate_coords(ignore_matplotlibrc):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
with pytest.raises(
ValueError,
match=r"WCS has more than 2 pixel dimensions, so 'slices' should be set",
):
plt.subplot(1, 1, 1, projection=wcs3d)
with pytest.raises(
ValueError,
match=(
r"'slices' should have as many elements as WCS has pixel dimensions .should"
r" be 3."
),
):
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1, 2))
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ["x", "y"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("x", "y"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("y", "x"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=["x", "y"])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "x"))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ["x"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "y"))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None no
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type ... wrap format_unit visible
----- ------------------------------ --------- ... --------- ----------- -------
0 distmod dist scalar ... None yes
1 pos.galactic.lon glon-car glon longitude ... 360.0 deg deg yes
2 pos.galactic.lat glat-car glat latitude ... None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, "x", "y"))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["FREQ", "TIME"]
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif(TEX_UNAVAILABLE, reason="TeX is unavailable")
def test_simplify_labels_usetex(ignore_matplotlibrc, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc("text", usetex=True)
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---MOL",
"CTYPE2": "DEC--MOL",
"RADESYS": "ICRS",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header["NAXIS1"] - 0.5)
ax.set_ylim(-0.5, header["NAXIS2"] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmp_path / "plot.png")
@pytest.mark.parametrize("frame_class", [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ["RA", "Declination"]
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---AIT",
"CTYPE2": "DEC--AIT",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize("atol", [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail(
"Exact BoundingBox dimensions are only ensured with FreeType 2.6.1"
)
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmp_path):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color="black", grid_type="contours")
fig.savefig(tmp_path / "plot.png")
fig.savefig(tmp_path / "plot.png")
def test_get_coord_range_nan_regression():
# Test to make sure there is no internal casting of NaN to integers
# NumPy 1.24 raises a RuntimeWarning if a NaN is cast to an integer
wcs = WCS(TARGET_HEADER)
wcs.wcs.crval[0] = 0 # Re-position the longitude wrap to the middle
ax = plt.subplot(1, 1, 1, projection=wcs)
# Set the Y limits within valid latitudes/declinations
ax.set_ylim(300, 500)
# Set the X limits within valid longitudes/RAs, so the world coordinates have no NaNs
ax.set_xlim(300, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[
(-123.5219272110385, 122.49684897692201),
(-44.02289164685554, 44.80732766607591),
]
),
)
# Extend the X limits to include invalid longitudes/RAs, so the world coordinates have NaNs
ax.set_xlim(0, 700)
assert np.allclose(
ax.coords.get_coord_range(),
np.array(
[(-131.3193386797236, 180.0), (-44.02289164685554, 44.80732766607591)]
),
)
def test_imshow_error():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
with pytest.raises(ValueError, match="Cannot use images with origin='upper"):
ax.imshow(np.ones(100).reshape(10, 10), origin="upper")
def test_label_setting():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
# Check both xlabel and label kwargs work
ax.set_xlabel(xlabel="label")
ax.set_xlabel(label="label")
# Check no label errors:
with pytest.raises(
TypeError, match=r"set_xlabel\(\) missing 1 required positional argument"
):
ax.set_xlabel()
# Check both xlabel and label kwargs work
ax.set_ylabel(ylabel="label")
ax.set_ylabel(label="label")
# Check no label errors:
with pytest.raises(
TypeError, match=r"set_ylabel\(\) missing 1 required positional argument"
):
ax.set_ylabel()
def test_invisible_bbox():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
assert ax.get_tightbbox(fig.canvas.get_renderer()) is not None
ax.set_visible(False)
assert ax.get_tightbbox(fig.canvas.get_renderer()) is None
|
afcbce1fdf4b6d2976b3a330c84f7b8f5df829230794e40c2c859952c1f32434 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from matplotlib import rc_context
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.visualization.wcsaxes.formatter_locator import (
AngleFormatterLocator,
ScalarFormatterLocator,
)
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
MESSAGE = r"At most one of values/number/spacing can be specified"
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], number=5)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], spacing=5.0 * u.deg)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(number=5, spacing=5.0 * u.deg)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], number=5, spacing=5.0 * u.deg)
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1.0, 14.0] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1.0, 14.0]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1.0, 14.0])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35.0, 40.0, 45.0, 50.0, 55.0])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(
values.to_value(u.degree), [34.5, 34.75, 35.0, 35.25, 35.5, 35.75, 36.0]
)
fl.format = "dd"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35.0, 36.0])
def test_spacing(self):
with pytest.raises(
TypeError,
match=(
r"spacing should be an astropy.units.Quantity instance with units of"
r" angle"
),
):
AngleFormatterLocator(spacing=3.0)
fl = AngleFormatterLocator(spacing=3.0 * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3.0 * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(
values.to_value(u.degree), [36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0]
)
fl.spacing = 30.0 * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35.0, 35.5, 36.0])
with pytest.warns(UserWarning, match=r"Spacing is too small"):
fl.format = "dd"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35.0, 36.0])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(
minor_values.to_value(u.degree),
[
36.0,
37.0,
38.0,
39.0,
41.0,
42.0,
43.0,
44.0,
46.0,
47.0,
48.0,
49.0,
51.0,
52.0,
53.0,
54.0,
],
)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1.0, 14.0] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(
("format", "string"),
[
("dd", "15\xb0"),
("dd:mm", "15\xb024'"),
("dd:mm:ss", "15\xb023'32\""),
("dd:mm:ss.s", "15\xb023'32.0\""),
("dd:mm:ss.ssss", "15\xb023'32.0316\""),
("hh", "1h"),
("hh:mm", "1h02m"),
("hh:mm:ss", "1h01m34s"),
("hh:mm:ss.s", "1h01m34.1s"),
("hh:mm:ss.ssss", "1h01m34.1354s"),
("d", "15\xb0"),
("d.d", "15.4\xb0"),
("d.dd", "15.39\xb0"),
("d.ddd", "15.392\xb0"),
("m", "924'"),
("m.m", "923.5'"),
("m.mm", "923.53'"),
("s", '55412"'),
("s.s", '55412.0"'),
("s.ss", '55412.03"'),
],
)
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.formatter([15.392231] * u.degree, None, format="ascii")[0] == string
@pytest.mark.parametrize(
("separator", "format", "string"),
[
(("deg", "'", '"'), "dd", "15deg"),
(("deg", "'", '"'), "dd:mm", "15deg24'"),
(("deg", "'", '"'), "dd:mm:ss", "15deg23'32\""),
((":", "-", "s"), "dd:mm:ss.s", "15:23-32.0s"),
(":", "dd:mm:ss.s", "15:23:32.0"),
((":", ":", "s"), "hh", "1:"),
(("-", "-", "s"), "hh:mm:ss.ssss", "1-01-34.1354s"),
(("d", ":", '"'), "d", "15\xb0"),
(("d", ":", '"'), "d.d", "15.4\xb0"),
],
)
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == "15\xb023'32\""
with rc_context(rc={"text.usetex": True}):
assert (
fl.formatter([15.392231] * u.degree, None)[0]
== "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
)
@pytest.mark.parametrize("format", ["x.xxx", "dd.ss", "dd:ss", "mdd:mm:ss"])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError, match=f"Invalid format: {format}"):
fl.format = format
@pytest.mark.parametrize(
("format", "base_spacing"),
[
("dd", 1.0 * u.deg),
("dd:mm", 1.0 * u.arcmin),
("dd:mm:ss", 1.0 * u.arcsec),
("dd:mm:ss.ss", 0.01 * u.arcsec),
("hh", 15.0 * u.deg),
("hh:mm", 15.0 * u.arcmin),
("hh:mm:ss", 15.0 * u.arcsec),
("hh:mm:ss.ss", 0.15 * u.arcsec),
("d", 1.0 * u.deg),
("d.d", 0.1 * u.deg),
("d.dd", 0.01 * u.deg),
("d.ddd", 0.001 * u.deg),
("m", 1.0 * u.arcmin),
("m.m", 0.1 * u.arcmin),
("m.mm", 0.01 * u.arcmin),
("s", 1.0 * u.arcsec),
("s.s", 0.1 * u.arcsec),
("s.ss", 0.01 * u.arcsec),
],
)
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
with pytest.warns(
UserWarning, match=r"Spacing is not a multiple of base spacing"
):
fl.format = "dd:mm:ss"
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.0)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = "d.dddd"
assert_quantity_allclose(
fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg,
)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(
fl.locator(266.9730, 266.9750)[0], [17.79825, 17.79830] * u.hourangle
)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[1000.0, 1200.0, 1400.0, 1600.0, 1800.0, 2000.0] * u.arcsec,
)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(
fl.locator(850, 2150)[0], [15.0, 20.0, 25.0, 30.0, 35.0] * u.arcmin
)
fl = AngleFormatterLocator(
unit=u.arcsec, format_unit=u.hourangle, decimal=False
)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[60.0, 75.0, 90.0, 105.0, 120.0, 135.0] * (15 * u.arcsec),
)
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = "dd:mm:ss"
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(
("spacing", "string"),
[
(2 * u.deg, "15\xb0"),
(2 * u.arcmin, "15\xb024'"),
(2 * u.arcsec, "15\xb023'32\""),
(0.1 * u.arcsec, "15\xb023'32.0\""),
],
)
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(
("format_unit", "decimal", "show_decimal_unit", "spacing", "ascii", "latex"),
[
(u.degree, False, True, 2 * u.degree, "15\xb0", r"$15^\circ$"),
(
u.degree,
False,
True,
2 * u.arcmin,
"15\xb024'",
r"$15^\circ24{}^\prime$",
),
(
u.degree,
False,
True,
2 * u.arcsec,
"15\xb023'32\"",
r"$15^\circ23{}^\prime32{}^{\prime\prime}$",
),
(
u.degree,
False,
True,
0.1 * u.arcsec,
"15\xb023'32.0\"",
r"$15^\circ23{}^\prime32.0{}^{\prime\prime}$",
),
(u.hourangle, False, True, 15 * u.degree, "1h", r"$1^{\mathrm{h}}$"),
(
u.hourangle,
False,
True,
15 * u.arcmin,
"1h02m",
r"$1^{\mathrm{h}}02^{\mathrm{m}}$",
),
(
u.hourangle,
False,
True,
15 * u.arcsec,
"1h01m34s",
r"$1^{\mathrm{h}}01^{\mathrm{m}}34^{\mathrm{s}}$",
),
(
u.hourangle,
False,
True,
1.5 * u.arcsec,
"1h01m34.1s",
r"$1^{\mathrm{h}}01^{\mathrm{m}}34.1^{\mathrm{s}}$",
),
(u.degree, True, True, 15 * u.degree, "15\xb0", r"$15\mathrm{^\circ}$"),
(
u.degree,
True,
True,
0.12 * u.degree,
"15.39\xb0",
r"$15.39\mathrm{^\circ}$",
),
(
u.degree,
True,
True,
0.0036 * u.arcsec,
"15.392231\xb0",
r"$15.392231\mathrm{^\circ}$",
),
(u.arcmin, True, True, 15 * u.degree, "924'", r"$924\mathrm{^\prime}$"),
(
u.arcmin,
True,
True,
0.12 * u.degree,
"923.5'",
r"$923.5\mathrm{^\prime}$",
),
(
u.arcmin,
True,
True,
0.1 * u.arcmin,
"923.5'",
r"$923.5\mathrm{^\prime}$",
),
(
u.arcmin,
True,
True,
0.0002 * u.arcmin,
"923.5339'",
r"$923.5339\mathrm{^\prime}$",
),
(
u.arcsec,
True,
True,
0.01 * u.arcsec,
'55412.03"',
r"$55412.03\mathrm{^{\prime\prime}}$",
),
(
u.arcsec,
True,
True,
0.001 * u.arcsec,
'55412.032"',
r"$55412.032\mathrm{^{\prime\prime}}$",
),
(
u.mas,
True,
True,
0.001 * u.arcsec,
"55412032 mas",
r"$55412032\;\mathrm{mas}$",
),
(u.degree, True, False, 15 * u.degree, "15", "15"),
(u.degree, True, False, 0.12 * u.degree, "15.39", "15.39"),
(u.degree, True, False, 0.0036 * u.arcsec, "15.392231", "15.392231"),
(u.arcmin, True, False, 15 * u.degree, "924", "924"),
(u.arcmin, True, False, 0.12 * u.degree, "923.5", "923.5"),
(u.arcmin, True, False, 0.1 * u.arcmin, "923.5", "923.5"),
(u.arcmin, True, False, 0.0002 * u.arcmin, "923.5339", "923.5339"),
(u.arcsec, True, False, 0.01 * u.arcsec, "55412.03", "55412.03"),
(u.arcsec, True, False, 0.001 * u.arcsec, "55412.032", "55412.032"),
(u.mas, True, False, 0.001 * u.arcsec, "55412032", "55412032"),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(
u.arcsec,
None,
True,
0.01 * u.arcsec,
'55412.03"',
r"$55412.03\mathrm{^{\prime\prime}}$",
),
],
)
def test_formatter_no_format_with_units(
self, format_unit, decimal, show_decimal_unit, spacing, ascii, latex
):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(
unit=u.degree,
format_unit=format_unit,
decimal=decimal,
show_decimal_unit=show_decimal_unit,
)
assert fl.formatter([15.392231] * u.degree, spacing, format="ascii")[0] == ascii
assert fl.formatter([15.392231] * u.degree, spacing, format="latex")[0] == latex
def test_incompatible_unit_decimal(self):
with pytest.raises(
UnitsError,
match=(
r"Units should be degrees or hours when using non-decimal .sexagesimal."
r" mode"
),
):
AngleFormatterLocator(unit=u.arcmin, decimal=False)
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
MESSAGE = r"At most one of values/number/spacing can be specified"
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, number=5)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, spacing=5.0 * u.m)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(number=5, spacing=5.0 * u.m)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, number=5, spacing=5.0 * u.m)
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1.0, 14.0] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1.0, 14.0]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1.0, 14.0])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36.0, 54.0, 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = "x"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35.0, 36.0])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3.0 * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3.0 * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35.0, 35.5, 36.0])
with pytest.warns(UserWarning, match=r"Spacing is too small"):
fl.format = "x"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35.0, 36.0])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(
minor_values.value,
[
36.0,
37.0,
38.0,
39.0,
41.0,
42.0,
43.0,
44.0,
46.0,
47.0,
48.0,
49.0,
51.0,
52.0,
53.0,
54.0,
],
)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1.0, 14.0] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(
("format", "string"),
[
("x", "15"),
("x.x", "15.4"),
("x.xx", "15.39"),
("x.xxx", "15.392"),
("%g", "15.3922"),
("%f", "15.392231"),
("%.2f", "15.39"),
("%.3f", "15.392"),
],
)
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(
("format", "string"),
[("x", "1539"), ("x.x", "1539.2"), ("x.xx", "1539.22"), ("x.xxx", "1539.223")],
)
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize("format", ["dd", "dd:mm", "xx:mm", "mx.xxx"])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError, match=f"Invalid format: {format}"):
fl.format = format
@pytest.mark.parametrize(
("format", "base_spacing"),
[("x", 1.0 * u.m), ("x.x", 0.1 * u.m), ("x.xxx", 0.001 * u.m)],
)
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
with pytest.warns(
UserWarning, match=r"Spacing is not a multiple of base spacing"
):
fl.format = "x.xx"
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[1000.0, 1200.0, 1400.0, 1600.0, 1800.0, 2000.0] * u.cm,
)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = "x.x"
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
|
4fea0133504229082f8734feac982e2fc60d28c42603f63f74f254a495b5c249 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import typing as T
# THIRD PARTY
import pytest
# LOCAL
from astropy import units as u
from astropy.units import Quantity
def test_ignore_generic_type_annotations():
"""Test annotations that are not unit related are ignored.
This test passes if the function works.
"""
# one unit, one not (should be ignored)
@u.quantity_input
def func(x: u.m, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str) # if this doesn't fail, it worked.
assert i_q == o_q
assert i_str == o_str
class TestQuantityUnitAnnotations:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: T.Optional[Quantity[u.m]] = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: T.Union[Quantity[u.m], Quantity[u.s], None]):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.deg, 1 * u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [u.arcsec, "angle"])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.eV), ("angle", "energy")]
)
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary + (10 * u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in units "
f"convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' "
"attribute. You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100)
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(
solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec, myk2=1000
):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy = 10 * u.eV):
return solarx, energy + (10 * u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in "
f"units convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1 * u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1 * u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1 * u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1 * u.arcsec)
|
fb3e774424427a32c68709a026e3a3fd8cac1f634c7462d74c1a5649fec95062 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
from typing import Annotated, Any, Optional, Union, get_args, get_origin, get_type_hints
import numpy as np
from astropy import units as u
class TestQuantityTyping:
"""Test Quantity Typing Annotations."""
def test_quantity_typing(self):
"""Test type hint creation from Quantity."""
annot = u.Quantity[u.m]
assert get_origin(annot) is Annotated
assert get_args(annot) == (u.Quantity, u.m)
# test usage
def func(x: annot, y: str) -> u.Quantity[u.s]:
return x, y
annots = get_type_hints(func, include_extras=True)
assert annots["x"] is annot
assert annots["return"].__metadata__[0] == u.s
def test_metadata_in_annotation(self):
"""Test Quantity annotation with added metadata."""
multi_annot = u.Quantity[u.m, Any, np.dtype]
def multi_func(x: multi_annot, y: str):
return x, y
annots = get_type_hints(multi_func, include_extras=True)
assert annots["x"] == multi_annot
def test_optional_and_annotated(self):
"""Test Quantity annotation in an Optional."""
opt_annot = Optional[u.Quantity[u.m]]
def opt_func(x: opt_annot, y: str):
return x, y
annots = get_type_hints(opt_func, include_extras=True)
assert annots["x"] == opt_annot
def test_union_and_annotated(self):
"""Test Quantity annotation in a Union."""
# double Quantity[]
union_annot1 = Union[u.Quantity[u.m], u.Quantity[u.s]]
# one Quantity, one physical-type
union_annot2 = Union[u.Quantity[u.m], u.Quantity["time"]]
# one Quantity, one general type
union_annot3 = Union[u.Quantity[u.m / u.s], float]
def union_func(x: union_annot1, y: union_annot2) -> union_annot3:
if isinstance(y, str): # value = time
return x.value # returns <float>
else:
return x / y # returns Quantity[m / s]
annots = get_type_hints(union_func, include_extras=True)
assert annots["x"] == union_annot1
assert annots["y"] == union_annot2
assert annots["return"] == union_annot3
def test_quantity_subclass_typing(self):
"""Test type hint creation from a Quantity subclasses."""
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
annot = Length[u.km]
assert get_origin(annot) is Annotated
assert get_args(annot) == (Length, u.km)
|
2d7b66b3250c90e969313e2d7f9265e38d7ce95ae3a919341693ce79beae2b23 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import dataclasses
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.numpycompat import NUMPY_LT_1_25
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test may fail if scipy is present, although the
# scipy.special ufuncs are only loaded on demand. This is because
# if a prior test has already imported scipy.special, then this test will be
# disrupted.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
@pytest.mark.skipif(HAS_SCIPY, reason="scipy coverage is known to be incomplete")
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
class TestWhere:
"""Test the where argument in ufuncs."""
def test_where(self):
q = np.arange(4.0) << u.m
out = np.zeros(4) << u.m
result = np.add(q, 1 * u.km, out=out, where=[True, True, True, False])
assert result is out
assert_array_equal(result, [1000.0, 1001.0, 1002.0, 0.0] << u.m)
@pytest.mark.xfail(
NUMPY_LT_1_25, reason="where array_ufunc support introduced in numpy 1.25"
)
def test_exception_with_where_quantity(self):
a = np.ones(2)
where = np.ones(2, bool) << u.m
with pytest.raises(TypeError, match="all returned NotImplemented"):
np.add(a, a, out=a, where=where)
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
@dataclasses.dataclass
class DuckQuantity1:
data: u.Quantity
@dataclasses.dataclass
class DuckQuantity2(DuckQuantity1):
@property
def unit(self) -> u.UnitBase:
return self.data.unit
@dataclasses.dataclass(eq=False)
class DuckQuantity3(DuckQuantity2):
def __array_ufunc__(self, function, method, *inputs, **kwargs):
inputs = [inp.data if isinstance(inp, type(self)) else inp for inp in inputs]
out = kwargs.get("out", None)
kwargs_copy = {}
for k in kwargs:
kwarg = kwargs[k]
if isinstance(kwarg, type(self)):
kwargs_copy[k] = kwarg.data
elif isinstance(kwarg, (list, tuple)):
kwargs_copy[k] = type(kwarg)(
item.data if isinstance(item, type(self)) else item
for item in kwarg
)
else:
kwargs_copy[k] = kwarg
kwargs = kwargs_copy
for inp in inputs:
if isinstance(inp, np.ndarray):
result = inp.__array_ufunc__(function, method, *inputs, **kwargs)
if result is not NotImplemented:
if out is None:
return type(self)(result)
else:
if function.nout == 1:
return out[0]
else:
return out
return NotImplemented
class DuckQuantity4(DuckQuantity3):
@property
def unit(self):
return DuckQuantity1(1 * self.data.unit)
class TestUfuncReturnsNotImplemented:
@pytest.mark.parametrize("ufunc", (np.negative, np.abs))
class TestUnaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, duck_quantity):
with pytest.raises(TypeError, match="bad operand type for .*"):
ufunc(duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(np.empty_like(ufunc(duck_quantity.data)))
out_expected = np.empty_like(ufunc(duck_quantity.data))
result = ufunc(duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
@pytest.mark.parametrize("ufunc", (np.add, np.multiply, np.less))
@pytest.mark.parametrize("quantity", (1 * u.m, [1, 2] * u.m))
class TestBinaryUfuncs:
@pytest.mark.parametrize(
"duck_quantity",
[DuckQuantity1(1 * u.mm), DuckQuantity2(1 * u.mm)],
)
def test_basic(self, ufunc, quantity, duck_quantity):
with pytest.raises(
(TypeError, ValueError),
match=(
r"(Unsupported operand type\(s\) for ufunc .*)|"
r"(unsupported operand type\(s\) for .*)|"
r"(Value not scalar compatible or convertible to an int, float, or complex array)"
),
):
ufunc(quantity, duck_quantity)
@pytest.mark.parametrize(
"duck_quantity",
[
DuckQuantity3(1 * u.mm),
DuckQuantity3([1, 2] * u.mm),
DuckQuantity4(1 * u.mm),
],
)
@pytest.mark.parametrize("out", [None, "empty"])
def test_full(self, ufunc, quantity, duck_quantity, out):
out_expected = out
if out == "empty":
out = type(duck_quantity)(
np.empty_like(ufunc(quantity, duck_quantity.data))
)
out_expected = np.empty_like(ufunc(quantity, duck_quantity.data))
result = ufunc(quantity, duck_quantity, out=out)
if out is not None:
assert result is out
result_expected = ufunc(quantity, duck_quantity.data, out=out_expected)
assert np.all(result.data == result_expected)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
2300d794d0ca5166335f6d896c7f160133858585bdc5de91b07efc5449fd0601 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import typing
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy import units as u
# list of pairs (target unit/physical type, input unit)
x_inputs = [
(u.arcsec, u.deg),
("angle", u.deg),
(u.kpc / u.Myr, u.km / u.s),
("speed", u.km / u.s),
([u.arcsec, u.km], u.deg),
([u.arcsec, u.km], u.km), # multiple allowed
(["angle", "length"], u.deg),
(["angle", "length"], u.km),
]
y_inputs = [
(u.m, u.km),
(u.km, u.m),
(u.arcsec, u.deg),
("angle", u.deg),
(u.kpc / u.Myr, u.km / u.s),
("speed", u.km / u.s),
]
@pytest.fixture(scope="module", params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module", params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, 1 * y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(
u.UnitsError,
match=(
"Argument 'y' to function 'myfunc_args' must be in units "
f"convertible to '{str(y_target)}'."
),
):
x, y = myfunc_args(1 * x_unit, 100 * u.Joule) # has to be an unspecified unit
def test_wrong_unit_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(u.UnitsError, match="Argument 'y' to function 'myfunc_args'"):
x, y = myfunc_args(1 * x_unit, 100 * u.Joule) # has to be an unspecified unit
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, 100)
def test_not_quantity_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, 100)
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1 * y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1 * x_unit, 100, y=100 * y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1 * x_unit, 100, y=100 * y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
with pytest.raises(
u.UnitsError,
match=(
"Argument 'y' to function 'myfunc_args' must be in units "
f"convertible to '{str(y_target)}'."
),
):
x, y = myfunc_args(1 * x_unit, y=100 * u.Joule)
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, y=100)
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
x, y = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1 * x_unit, y=1 * y_unit):
return x, y
kwargs = {"x": 10 * x_unit, "y": 10 * y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [(u.arcsec, u.eV), ("angle", "energy")])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit, equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y + (10 * u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10 * u.eV):
return x, energy + (10 * u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(
TypeError,
match=(
"Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an"
" 'is_equivalent' method. You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(test_quantity())
def test_kwarg_invalid_physical_type():
@u.quantity_input(x="angle", y="africanswallow")
def myfunc_args(x, y=10 * u.deg):
return x, y
with pytest.raises(
ValueError, match="Invalid unit or physical type 'africanswallow'."
):
x, y = myfunc_args(1 * u.arcsec, y=100 * u.deg)
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.0):
return x
x = myfunc_args()
x = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_str_unit_typo():
@u.quantity_input
def myfunc_args(x: "kilograam"):
return x
with pytest.raises(ValueError):
result = myfunc_args(u.kg)
class TestTypeAnnotations:
@pytest.mark.parametrize(
"annot",
[u.m, u.Quantity[u.m], u.Quantity[u.m, "more"]],
) # Note: parametrization is done even if test class is skipped
def test_single_annotation_unit(self, annot):
"""Try a variety of valid annotations."""
@u.quantity_input
def myfunc_args(x: annot, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = myfunc_args(i_q, i_str)
assert o_q == i_q
assert o_str == i_str
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1 * y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1 * x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
@pytest.mark.parametrize("val", [1.0, 1, np.arange(10), np.arange(10.0)])
def test_allow_dimensionless_numeric(val):
"""
When dimensionless_unscaled is an allowed unit, numbers and numeric numpy
arrays are allowed through
"""
@u.quantity_input(velocity=[u.km / u.s, u.dimensionless_unscaled])
def myfunc(velocity):
return velocity
assert np.all(myfunc(val) == val)
@pytest.mark.parametrize("val", [1.0, 1, np.arange(10), np.arange(10.0)])
def test_allow_dimensionless_numeric_strict(val):
"""
When dimensionless_unscaled is an allowed unit, but we are being strict,
don't allow numbers and numeric numpy arrays through
"""
@u.quantity_input(
velocity=[u.km / u.s, u.dimensionless_unscaled], strict_dimensionless=True
)
def myfunc(velocity):
return velocity
with pytest.raises(TypeError):
assert myfunc(val)
@pytest.mark.parametrize("val", [1 * u.deg, [1, 2, 3] * u.m])
def test_dimensionless_with_nondimensionless_input(val):
"""
When dimensionless_unscaled is the only allowed unit, don't let input with
non-dimensionless units through
"""
@u.quantity_input(x=u.dimensionless_unscaled)
def myfunc(x):
return x
with pytest.raises(u.UnitsError):
myfunc(val)
def test_annotated_not_quantity():
"""Test when annotation looks like a Quantity[X], but isn't."""
@u.quantity_input()
def myfunc(x: typing.Annotated[object, u.m]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
def test_annotated_not_unit():
"""Test when annotation looks like a Quantity[X], but the unit's wrong."""
@u.quantity_input()
def myfunc(x: typing.Annotated[u.Quantity, object()]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
|
92bd8a587f6c79692c4341408371bb888a5691f22c39b4776d2ec408a10676a1 | """
Tests the Angle string formatting capabilities. SkyCoord formatting is in
test_sky_coord
"""
import pytest
from astropy import units as u
from astropy.coordinates.angles import Angle
def test_to_string_precision():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1319 which caused incorrect formatting of the
# seconds for precision=0
angle = Angle(-1.23456789, unit=u.degree)
assert angle.to_string(precision=3) == "-1d14m04.444s"
assert angle.to_string(precision=1) == "-1d14m04.4s"
assert angle.to_string(precision=0) == "-1d14m04s"
angle2 = Angle(-1.23456789, unit=u.hourangle)
assert angle2.to_string(precision=3, unit=u.hour) == "-1h14m04.444s"
assert angle2.to_string(precision=1, unit=u.hour) == "-1h14m04.4s"
assert angle2.to_string(precision=0, unit=u.hour) == "-1h14m04s"
# Regression test for #7141
angle3 = Angle(-0.5, unit=u.degree)
assert angle3.to_string(precision=0, fields=3) == "-0d30m00s"
assert angle3.to_string(precision=0, fields=2) == "-0d30m"
assert angle3.to_string(precision=0, fields=1) == "-1d"
def test_to_string_decimal():
# There are already some tests in test_api.py, but this is a regression
# test for the bug in issue #1323 which caused decimal formatting to not
# work
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, precision=3) == "2.000"
assert angle1.to_string(decimal=True, precision=1) == "2.0"
assert angle1.to_string(decimal=True, precision=0) == "2"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, precision=3) == "3.000"
assert angle2.to_string(decimal=True, precision=1) == "3.0"
assert angle2.to_string(decimal=True, precision=0) == "3"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, precision=3) == "4.000"
assert angle3.to_string(decimal=True, precision=1) == "4.0"
assert angle3.to_string(decimal=True, precision=0) == "4"
with pytest.raises(ValueError, match="sexagesimal notation"):
angle3.to_string(decimal=True, sep="abc")
def test_to_string_formats():
a = Angle(1.113355, unit=u.deg)
latex_str = r"$1^\circ06{}^\prime48.078{}^{\prime\prime}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1°06′48.078″"
a = Angle(1.113355, unit=u.hour)
latex_str = r"$1^{\mathrm{h}}06^{\mathrm{m}}48.078^{\mathrm{s}}$"
assert a.to_string(format="latex") == latex_str
assert a.to_string(format="latex_inline") == latex_str
assert a.to_string(format="unicode") == "1ʰ06ᵐ48.078ˢ"
a = Angle(1.113355, unit=u.radian)
assert a.to_string(format="latex") == r"$1.11336\;\mathrm{rad}$"
assert a.to_string(format="latex_inline") == r"$1.11336\;\mathrm{rad}$"
assert a.to_string(format="unicode") == "1.11336 rad"
def test_to_string_decimal_formats():
angle1 = Angle(2.0, unit=u.degree)
assert angle1.to_string(decimal=True, format="generic") == "2 deg"
assert angle1.to_string(decimal=True, format="latex") == "$2\\mathrm{{}^{\\circ}}$"
assert angle1.to_string(decimal=True, format="unicode") == "2°"
angle2 = Angle(3.0, unit=u.hourangle)
assert angle2.to_string(decimal=True, format="generic") == "3 hourangle"
assert angle2.to_string(decimal=True, format="latex") == "$3\\mathrm{{}^{h}}$"
assert angle2.to_string(decimal=True, format="unicode") == "3ʰ"
angle3 = Angle(4.0, unit=u.radian)
assert angle3.to_string(decimal=True, format="generic") == "4 rad"
assert angle3.to_string(decimal=True, format="latex") == "$4\\;\\mathrm{rad}$"
assert angle3.to_string(decimal=True, format="unicode") == "4 rad"
with pytest.raises(ValueError, match="Unknown format"):
angle3.to_string(decimal=True, format="myformat")
def test_to_string_fields():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=1) == r"1d"
assert a.to_string(fields=2) == r"1d07m"
assert a.to_string(fields=3) == r"1d06m48.078s"
def test_to_string_padding():
a = Angle(0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"00:33:55.08"
# Test to make sure negative angles are padded correctly
a = Angle(-0.5653, unit=u.deg)
assert a.to_string(unit="deg", sep=":", pad=True) == r"-00:33:55.08"
def test_sexagesimal_rounding_up():
a = Angle(359.999999999999, unit=u.deg)
assert a.to_string(precision=None) == "360d00m00s"
assert a.to_string(precision=4) == "360d00m00.0000s"
assert a.to_string(precision=5) == "360d00m00.00000s"
assert a.to_string(precision=6) == "360d00m00.000000s"
assert a.to_string(precision=7) == "360d00m00.0000000s"
assert a.to_string(precision=8) == "360d00m00.00000000s"
assert a.to_string(precision=9) == "359d59m59.999999996s"
a = Angle(3.999999, unit=u.deg)
assert a.to_string(fields=2, precision=None) == "4d00m"
assert a.to_string(fields=2, precision=1) == "4d00m"
assert a.to_string(fields=2, precision=5) == "4d00m"
assert a.to_string(fields=1, precision=1) == "4d"
assert a.to_string(fields=1, precision=5) == "4d"
def test_to_string_scalar():
a = Angle(1.113355, unit=u.deg)
assert isinstance(a.to_string(), str)
def test_to_string_radian_with_precision():
"""
Regression test for a bug that caused ``to_string`` to crash for angles in
radians when specifying the precision.
"""
# Check that specifying the precision works
a = Angle(3.0, unit=u.rad)
assert a.to_string(precision=3, sep="fromunit") == "3.000 rad"
def test_sexagesimal_round_down():
a1 = Angle(1, u.deg).to(u.hourangle)
a2 = Angle(2, u.deg)
assert a1.to_string() == "0h04m00s"
assert a2.to_string() == "2d00m00s"
def test_to_string_fields_colon():
a = Angle(1.113355, unit=u.deg)
assert a.to_string(fields=2, sep=":") == "1:07"
assert a.to_string(fields=3, sep=":") == "1:06:48.078"
assert a.to_string(fields=1, sep=":") == "1"
|
df65f87a3b832d2f61d50bedef02e14ac4ba93f8a7eb85d11cd12a188d40e4cf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization and other aspects of Angle and subclasses"""
import pickle
import threading
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Angle, Latitude, Longitude
from astropy.coordinates.errors import (
IllegalHourError,
IllegalMinuteError,
IllegalMinuteWarning,
IllegalSecondError,
IllegalSecondWarning,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
""" The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle."""
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a8 = Angle("54°07'26.832\"")
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hour"):
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit="hour")
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
a25 = Angle(3.0, unit=u.hour**1)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
assert a24 == a25
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.0) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45.0, u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.0 * u.deg
assert type(a8) is Angle
a9 = 1.0 * u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here which caused problems before: #8368
a = Angle([0.0, 2.0], "deg")
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1.0 * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1.0 * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1.0 * u.degree**2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2.0 * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2.0 * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0.0 * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
"""
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
"""
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = "Angle as HMS: 3h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour)}" == res
res = "Angle as HMS: 3:36:29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':')}" == res
res = "Angle as HMS: 3:36:29.79"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep=':', precision=2)}" == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as HMS: 3h36m29.7888s"
assert (
"Angle as HMS:"
f" {angle.to_string(unit=u.hour, sep=('h', 'm', 's'), precision=4)}" == res
)
res = "Angle as HMS: 3-36|29.7888"
assert (
f"Angle as HMS: {angle.to_string(unit=u.hour, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as HMS: 3-36-29.7888"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, sep='-', precision=4)}" == res
res = "Angle as HMS: 03h36m29.7888s"
assert f"Angle as HMS: {angle.to_string(unit=u.hour, precision=4, pad=True)}" == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = "Angle as DMS: 3d36m29.7888s"
assert f"Angle as DMS: {angle.to_string(unit=u.degree)}" == res
res = "Angle as DMS: 3:36:29.7888"
assert f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':')}" == res
res = "Angle as DMS: 3:36:29.79"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=':', precision=2)}" == res
)
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = "Angle as DMS: 3d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.deg, sep=('d', 'm', 's'), precision=4)}"
== res
)
res = "Angle as DMS: 3-36|29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep=['-', '|'], precision=4)}"
== res
)
res = "Angle as DMS: 3-36-29.7888"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, sep='-', precision=4)}" == res
)
res = "Angle as DMS: 03d36m29.7888s"
assert (
f"Angle as DMS: {angle.to_string(unit=u.degree, precision=4, pad=True)}" == res
)
res = "Angle as rad: 0.0629763 rad"
assert f"Angle as rad: {angle.to_string(unit=u.radian)}" == res
res = "Angle as rad decimal: 0.0629763"
assert (
f"Angle as rad decimal: {angle.to_string(unit=u.radian, decimal=True)}" == res
)
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == "-1d14m04.444404s"
assert angle.to_string(pad=True) == "-01d14m04.444404s"
assert angle.to_string(unit=u.hour) == "-0h04m56.2962936s"
assert angle2.to_string(unit=u.hour, pad=True) == "-01h14m04.444404s"
assert angle.to_string(unit=u.radian, decimal=True) == "-0.0215473"
# We should recognize units that are equal but not identical
assert angle.to_string(unit=u.hour**1) == "-0h04m56.2962936s"
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert (
Angle([1.0 / 7.0, 1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
)
assert Angle([1.0 / 7.0], unit="deg").to_string()[0] == "0d08m34.28571429s"
assert Angle(1.0 / 7.0, unit="deg").to_string() == "0d08m34.28571429s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle("1d2m3.4s")
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude("1h2m3.4s")
dec = Latitude("1d2m3.4s")
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
"""
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
"""
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
ra = Longitude((12, 14, 52), unit=u.hour)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle("-00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
# Unicode minus
a = Angle("−00:00:10", u.deg)
assert_allclose(a.degree, -10.0 / 3600.0)
def test_negative_zero_dm():
# Test for DM parser
a = Angle("-00:10", u.deg)
assert_allclose(a.degree, -10.0 / 60.0)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle("-00:00:10", u.hour)
assert_allclose(a.hour, -10.0 / 3600.0)
def test_negative_zero_hm():
# Test for HM parser
a = Angle("-00:10", u.hour)
assert_allclose(a.hour, -10.0 / 60.0)
def test_negative_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("-00:60", u.hour)
assert_allclose(a.hour, -1.0)
def test_plus_sixty_hm():
# Test for HM parser
with pytest.warns(IllegalMinuteWarning):
a = Angle("00:60", u.hour)
assert_allclose(a.hour, 1.0)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:59:60", u.deg)
assert_allclose(a.degree, -1.0)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:59:60", u.deg)
assert_allclose(a.degree, 1.0)
def test_negative_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("-00:00:60", u.deg)
assert_allclose(a.degree, -1.0 / 60.0)
def test_plus_sixty_dms():
# Test for DMS parser
with pytest.warns(IllegalSecondWarning):
a = Angle("+00:00:60", u.deg)
assert_allclose(a.degree, 1.0 / 60.0)
def test_angle_to_is_angle():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0 * u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0 * u.meter)
a = Angle(1.0 * u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0 * u.min)
def test_angle_string():
with pytest.warns(IllegalSecondWarning):
a = Angle("00:00:60", u.deg)
assert str(a) == "0d01m00s"
a = Angle("00:00:59S", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("00:00:59N", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59E", u.deg)
assert str(a) == "0d00m59s"
a = Angle("00:00:59W", u.deg)
assert str(a) == "-0d00m59s"
a = Angle("-00:00:10", u.hour)
assert str(a) == "-0h00m10s"
a = Angle("00:00:59E", u.hour)
assert str(a) == "0h00m59s"
a = Angle("00:00:59W", u.hour)
assert str(a) == "-0h00m59s"
a = Angle(3.2, u.radian)
assert str(a) == "3.2 rad"
a = Angle(4.2, u.microarcsecond)
assert str(a) == "4.2 uarcsec"
a = Angle("1.0uarcsec")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecN")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecS")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecE")
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("1.0uarcsecW")
assert a.value == -1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle("3dN")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dS")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle("3dE")
assert str(a) == "3d00m00s"
assert a.unit == u.degree
a = Angle("3dW")
assert str(a) == "-3d00m00s"
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'N")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'S")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("10'E")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
a = Angle("10'W")
assert_allclose(a.value, -10.0)
assert a.unit == u.arcminute
a = Angle("45°55′12″N")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″S")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
a = Angle("45°55′12″E")
assert str(a) == "45d55m12s"
assert_allclose(a.value, 45.92)
assert a.unit == u.deg
a = Angle("45°55′12″W")
assert str(a) == "-45d55m12s"
assert_allclose(a.value, -45.92)
assert a.unit == u.deg
with pytest.raises(ValueError):
Angle("00h00m10sN")
with pytest.raises(ValueError):
Angle("45°55′12″NS")
def test_angle_repr():
assert "Angle" in repr(Angle(0, u.deg))
assert "Longitude" in repr(Longitude(0, u.deg))
assert "Latitude" in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at("180d", inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20.0, 150.0, -10.0, 0.0]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(["91d", "89d"])
with pytest.raises(ValueError):
lat = Latitude("-91d")
lat = Latitude(["90d", "89d"])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(["90d", "89d"]))
# check setitem works
lat[1] = 45.0 * u.deg
assert np.all(lat == Angle(["90d", "45d"]))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(["90d", "45d"]))
# conserve type on unit change (closes #1423)
angle = lat.to("radian")
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude("80d")
angle = lat / 2.0
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.0
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(
TypeError, match="A Latitude angle cannot be created from a Longitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude(lon)
with pytest.raises(
TypeError, match="A Longitude angle cannot be assigned to a Latitude angle"
):
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = lon
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, "deg")
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, "deg")
lat = Latitude([20], "deg")
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(["370d", "88d"])
assert np.all(lon == Longitude(["10d", "88d"]))
assert np.all(lon == Angle(["10d", "88d"]))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to("hourangle")
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.0
assert np.all(angle == Angle(["5d", "44d"]))
assert type(angle) is Angle
assert not hasattr(angle, "wrap_angle")
angle = lon * 2.0 + 400 * u.deg
assert np.all(angle == Angle(["420d", "576d"]))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(["10d", "350d"]))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0.0, 90, 180, 270, 0]))
lon = Longitude(
np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle="180d"
)
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = "180d"
assert np.all(lon.degree == np.array([0.0, 90, -180, -90, 0]))
lon = Longitude("460d")
assert lon == Angle("100d")
lon.wrap_angle = "90d"
assert lon == Angle("-260d")
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle="180d")
assert lon3.wrap_angle == 180 * u.deg
# check that wrap_angle is always an Angle
lon = Longitude(lon, wrap_angle=Longitude(180 * u.deg))
assert lon.wrap_angle == 180 * u.deg
assert lon.wrap_angle.__class__ is Angle
# check that wrap_angle is not copied
wrap_angle = 180 * u.deg
lon = Longitude(lon, wrap_angle=wrap_angle)
assert lon.wrap_angle == 180 * u.deg
assert np.may_share_memory(lon.wrap_angle, wrap_angle)
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith("-")
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(
TypeError, match="A Longitude angle cannot be created from a Latitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude(lat)
with pytest.raises(
TypeError, match="A Latitude angle cannot be assigned to a Longitude angle"
):
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = lat
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, "deg")
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, "deg")
lon = Longitude([20], "deg")
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(
a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340.0, 150.0, 350.0, 0.0])
)
assert np.all(a.wrap_at("360d").degree == np.array([340.0, 150.0, 350.0, 0.0]))
assert np.all(a.wrap_at("180d").degree == np.array([-20.0, 150.0, -10.0, 0.0]))
assert np.all(
a.wrap_at(np.pi * u.rad).degree == np.array([-20.0, 150.0, -10.0, 0.0])
)
# Test wrapping a scalar Angle
a = Angle("190d")
assert a.wrap_at("180d") == Angle("-170d")
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle("-20d")
assert a.is_within_bounds("0d", "360d") is False
assert a.is_within_bounds(None, "360d") is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle("+6h7m8s", unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0.0, unit="deg").to_string() == "-0d00m00s"
assert Angle(-1.0, unit="deg").to_string() == "-1d00m00s"
assert Angle(-0.0, unit="hour").to_string() == "-0h00m00s"
assert Angle(-1.0, unit="hour").to_string() == "-1h00m00s"
def test_regression_formatting_default_precision():
# Regression test for issue #11140
assert Angle("10:20:30.12345678d").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123456784564s").to_string() == "10d20m30.12345678s"
assert Angle("10d20m30.123s").to_string() == "10d20m30.123s"
def test_empty_sep():
a = Angle("05h04m31.93830s")
assert a.to_string(sep="", precision=2, pad=True) == "050431.94"
def test_create_tuple():
"""
Tests creation of an angle with an (h,m,s) tuple
(d, m, s) tuples are not tested because of sign ambiguity issues (#13162)
"""
with pytest.warns(AstropyDeprecationWarning, match="hms_to_hours"):
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1 * u.deg, 1 * u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1 * u.hourangle, 1 * u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [0.25, 0.4, 0.5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(["1d", 1.0 * u.deg])
assert_array_equal(a1.value, [1.0, 1.0])
assert a1.unit == u.deg
a2 = Angle(["1d", 1 * u.rad * np.pi, "3d"])
assert_array_equal(a2.value, [1.0, 180.0, 3.0])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == "U"
assert np.all(aobj.to_string() == ["1d00m00s", "2d00m00s"])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1] * u.deg)
l2 = Longitude([2] * u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle("10.2345d")
strscangle = scangle.__str__()
assert strscangle == "10d14m04.2s"
# non-scalar array angles
arrangle = Angle(["10.2345d", "-20d"])
strarrangle = arrangle.__str__()
assert strarrangle == "[10d14m04.2s -20d00m00s]"
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert "..." in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r"$2^\circ06{}^\prime00{}^{\prime\prime}$"
assert rlscangle.split("$")[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000) / 50000.0, u.deg)
assert "..." in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
# the problem is with the parser, so remove it temporarily
from astropy.coordinates.angle_formats import _AngleParser
from astropy.units import cds
del _AngleParser._thread_local._parser
with cds.enable():
Angle("5d")
del _AngleParser._thread_local._parser
Angle("5d")
def test_longitude_nan():
# Check that passing a NaN to Longitude doesn't raise a warning
Longitude([0, np.nan, 1] * u.deg)
def test_latitude_nan():
# Check that passing a NaN to Latitude doesn't raise a warning
Latitude([0, np.nan, 1] * u.deg)
def test_angle_wrap_at_nan():
# Check that no attempt is made to wrap a NaN angle
angle = Angle([0, np.nan, 1] * u.deg)
angle.flags.writeable = False # to force an error if a write is attempted
angle.wrap_at(180 * u.deg, inplace=True)
def test_angle_multithreading():
"""
Regression test for issue #7168
"""
angles = ["00:00:00"] * 10000
def parse_test(i=0):
Angle(angles, unit="hour")
for i in range(10):
threading.Thread(target=parse_test, args=(i,)).start()
@pytest.mark.parametrize("cls", [Angle, Longitude, Latitude])
@pytest.mark.parametrize(
"input, expstr, exprepr",
[
(np.nan * u.deg, "nan", "nan deg"),
([np.nan, 5, 0] * u.deg, "[nan 5d00m00s 0d00m00s]", "[nan, 5., 0.] deg"),
([6, np.nan, 0] * u.deg, "[6d00m00s nan 0d00m00s]", "[6., nan, 0.] deg"),
([np.nan, np.nan, np.nan] * u.deg, "[nan nan nan]", "[nan, nan, nan] deg"),
(np.nan * u.hour, "nan", "nan hourangle"),
([np.nan, 5, 0] * u.hour, "[nan 5h00m00s 0h00m00s]", "[nan, 5., 0.] hourangle"),
([6, np.nan, 0] * u.hour, "[6h00m00s nan 0h00m00s]", "[6., nan, 0.] hourangle"),
(
[np.nan, np.nan, np.nan] * u.hour,
"[nan nan nan]",
"[nan, nan, nan] hourangle",
),
(np.nan * u.rad, "nan", "nan rad"),
([np.nan, 1, 0] * u.rad, "[nan 1 rad 0 rad]", "[nan, 1., 0.] rad"),
([1.50, np.nan, 0] * u.rad, "[1.5 rad nan 0 rad]", "[1.5, nan, 0.] rad"),
([np.nan, np.nan, np.nan] * u.rad, "[nan nan nan]", "[nan, nan, nan] rad"),
],
)
def test_str_repr_angles_nan(cls, input, expstr, exprepr):
"""
Regression test for issue #11473
"""
q = cls(input)
assert str(q) == expstr
# Deleting whitespaces since repr appears to be adding them for some values
# making the test fail.
assert repr(q).replace(" ", "") == f"<{cls.__name__}{exprepr}>".replace(" ", "")
@pytest.mark.parametrize("sign", (-1, 1))
@pytest.mark.parametrize(
"value,expected_value,dtype,expected_dtype",
[
(np.pi / 2, np.pi / 2, None, np.float64),
(np.pi / 2, np.pi / 2, np.float64, np.float64),
(np.float32(np.pi / 2), np.float32(np.pi / 2), None, np.float32),
(np.float32(np.pi / 2), np.float32(np.pi / 2), np.float32, np.float32),
# these cases would require coercing the float32 value to the float64 value
# making validate have side effects, so it's not implemented for now
# (np.float32(np.pi / 2), np.pi / 2, np.float64, np.float64),
# (np.float32(-np.pi / 2), -np.pi / 2, np.float64, np.float64),
],
)
def test_latitude_limits(value, expected_value, dtype, expected_dtype, sign):
"""
Test that the validation of the Latitude value range in radians works
in both float32 and float64.
As discussed in issue #13708, before, the float32 representation of pi/2
was rejected as invalid because the comparison always used the float64
representation.
"""
# this prevents upcasting to float64 as sign * value would do
if sign < 0:
value = -value
expected_value = -expected_value
result = Latitude(value, u.rad, dtype=dtype)
assert result.value == expected_value
assert result.dtype == expected_dtype
assert result.unit == u.rad
@pytest.mark.parametrize(
"value,dtype",
[
(0.50001 * np.pi, np.float32),
(np.float32(0.50001 * np.pi), np.float32),
(0.50001 * np.pi, np.float64),
],
)
def test_latitude_out_of_limits(value, dtype):
"""
Test that values slightly larger than pi/2 are rejected for different dtypes.
Test cases for issue #13708
"""
with pytest.raises(ValueError, match=r"Latitude angle\(s\) must be within.*"):
Latitude(value, u.rad, dtype=dtype)
def test_angle_pickle_to_string():
"""
Ensure that after pickling we can still do to_string on hourangle.
Regression test for gh-13923.
"""
angle = Angle(0.25 * u.hourangle)
expected = angle.to_string()
via_pickle = pickle.loads(pickle.dumps(angle))
via_pickle_string = via_pickle.to_string() # This used to fail.
assert via_pickle_string == expected
|
ffacf702ceb065f0e7b79956d6f1d42c84c039fa314b72e85502e9a92cfaeb65 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import numbers
import operator
import re
import sys
import warnings
import weakref
from collections import OrderedDict
from contextlib import suppress
from functools import reduce
import numpy as np
from numpy import char as chararray
from astropy.utils import indent, isiterable, lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from .card import CARD_LENGTH, Card
from .util import NotifierMixin, _convert_array, _is_int, cmp, encode_ascii, pairwise
from .verify import VerifyError, VerifyWarning
__all__ = ["Column", "ColDefs", "Delayed"]
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {
"L": "i1",
"B": "u1",
"I": "i2",
"J": "i4",
"K": "i8",
"E": "f4",
"D": "f8",
"C": "c8",
"M": "c16",
"A": "a",
}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS["b1"] = "L"
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS["u2"] = "I"
NUMPY2FITS["u4"] = "J"
NUMPY2FITS["u8"] = "K"
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS["f2"] = "E"
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ["L", "B", "I", "J", "K", "D", "M", "A"]
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {"E": "D", "C": "M"}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {"A": "a", "I": "i4", "J": "i8", "F": "f8", "E": "f8", "D": "f8"}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {"A": "", "I": "d", "J": "d", "F": "f", "E": "E", "D": "E"}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {
"A": (1, 0),
"I": (10, 0),
"J": (15, 0),
"E": (15, 7),
"F": (16, 7),
"D": (25, 17),
}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT["F"] = re.compile(
r"(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)|"
)
TDISP_RE_DICT["A"] = TDISP_RE_DICT["L"] = re.compile(
r"(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|"
)
TDISP_RE_DICT["I"] = TDISP_RE_DICT["B"] = TDISP_RE_DICT["O"] = TDISP_RE_DICT[
"Z"
] = re.compile(
r"(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)"
r"(?:\.{0,1}(?P<precision>[0-9]+))?))|"
)
TDISP_RE_DICT["E"] = TDISP_RE_DICT["G"] = TDISP_RE_DICT["D"] = re.compile(
r"(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\."
r"(?P<precision>[0-9]+))+)"
r"(?:E{0,1}(?P<exponential>[0-9]+)?)|"
)
TDISP_RE_DICT["EN"] = TDISP_RE_DICT["ES"] = re.compile(
r"(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}(?P<precision>[0-9])+)+)"
)
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
"I": "{{:{width}d}}",
"B": "{{:{width}b}}",
"O": "{{:{width}o}}",
"Z": "{{:{width}x}}",
"F": "{{:{width}.{precision}f}}",
"G": "{{:{width}.{precision}g}}",
}
TDISP_FMT_DICT["A"] = TDISP_FMT_DICT["L"] = "{{:>{width}}}"
TDISP_FMT_DICT["E"] = TDISP_FMT_DICT["D"] = TDISP_FMT_DICT["EN"] = TDISP_FMT_DICT[
"ES"
] = "{{:{width}.{precision}e}}"
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = (
"TTYPE",
"TFORM",
"TUNIT",
"TNULL",
"TSCAL",
"TZERO",
"TDISP",
"TBCOL",
"TDIM",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
)
KEYWORD_ATTRIBUTES = (
"name",
"format",
"unit",
"null",
"bscale",
"bzero",
"disp",
"start",
"dim",
"coord_type",
"coord_unit",
"coord_ref_point",
"coord_ref_value",
"coord_inc",
"time_ref_pos",
)
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(
r"(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])(?P<option>[!-~]*)", re.I
)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(
r"(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|"
r"(?:(?P<formatf>[FED])"
r"(?:(?P<widthf>[0-9]+)(?:\."
r"(?P<precision>[0-9]+))?)?)"
)
TTYPE_RE = re.compile(r"[0-9a-zA-Z_]+")
"""
Regular expression for valid table column names. See FITS Standard v3.0 section 7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r"(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)")
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r"\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*")
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = "---"
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ("P", "Q"):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == "P":
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ""
else:
repeat = str(self.repeat)
return f"{repeat}{self.format}{self.option}"
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = _parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == "L":
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ("E", "F", "D"):
return f"{self.format}{self.width}.{self.precision}"
return f"{self.format}{self.width}"
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + "u1")
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f"{self.repeat}X"
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (
r"(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])(?:\((?P<max>\d*)\))?"
)
_format_code = "P"
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = "2i4"
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group("dtype") not in FITS2NUMPY:
raise VerifyError(f"Invalid column format: {format}")
repeat = m.group("repeat")
array_dtype = m.group("dtype")
max = m.group("max")
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = "" if self.repeat is None else self.repeat
max = "" if self.max is None else self.max
return f"{repeat}{self._format_code}{self.format}({max})"
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = "_" + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify("column_attribute_changed", obj, self._attr[1:], old_value, value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(
self,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
array=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError("Must specify format to construct Column.")
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {"ascii": ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ["The following keyword arguments to Column were invalid:"]
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError("\n".join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs["recformat"]
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError(
f"Data is inconsistent with the format `{format}`."
)
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ""
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + " = " + repr(value) + "; "
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if "array" in self.__dict__:
return self.__dict__["array"]
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if hasattr(base, "_coldefs") and isinstance(base._coldefs, ColDefs):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if "array" in self.__dict__:
del self.__dict__["array"]
return
if getattr(base, "base", None) is not None:
base = base.base
else:
break
self.__dict__["array"] = array
@array.deleter
def array(self):
try:
del self.__dict__["array"]
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute("TTYPE")
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
"It is strongly recommended that column names contain only "
"upper and lower-case ASCII letters, digits, or underscores "
"for maximum compatibility with other software "
"(got {!r}).".format(name),
VerifyWarning,
)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if not isinstance(name, str) or len(str(Card("TTYPE", name))) != CARD_LENGTH:
raise AssertionError(
"Column name must be a string able to fit in a single "
"FITS card--typically this means a maximum of 68 "
"characters, though it may be fewer if the string "
"contains special characters like quotes."
)
@ColumnAttribute("TCTYP")
def coord_type(col, coord_type):
if coord_type is None:
return
if not isinstance(coord_type, str) or len(coord_type) > 8:
raise AssertionError(
"Coordinate/axis type must be a string of atmost 8 characters."
)
@ColumnAttribute("TCUNI")
def coord_unit(col, coord_unit):
if coord_unit is not None and not isinstance(coord_unit, str):
raise AssertionError("Coordinate/axis unit must be a string.")
@ColumnAttribute("TCRPX")
def coord_ref_point(col, coord_ref_point):
if coord_ref_point is not None and not isinstance(
coord_ref_point, numbers.Real
):
raise AssertionError(
"Pixel coordinate of the reference point must be real floating type."
)
@ColumnAttribute("TCRVL")
def coord_ref_value(col, coord_ref_value):
if coord_ref_value is not None and not isinstance(
coord_ref_value, numbers.Real
):
raise AssertionError(
"Coordinate value at reference point must be real floating type."
)
@ColumnAttribute("TCDLT")
def coord_inc(col, coord_inc):
if coord_inc is not None and not isinstance(coord_inc, numbers.Real):
raise AssertionError("Coordinate increment must be real floating type.")
@ColumnAttribute("TRPOS")
def time_ref_pos(col, time_ref_pos):
if time_ref_pos is not None and not isinstance(time_ref_pos, str):
raise AssertionError("Time reference position must be a string.")
format = ColumnAttribute("TFORM")
unit = ColumnAttribute("TUNIT")
null = ColumnAttribute("TNULL")
bscale = ColumnAttribute("TSCAL")
bzero = ColumnAttribute("TZERO")
disp = ColumnAttribute("TDISP")
start = ColumnAttribute("TBCOL")
dim = ColumnAttribute("TDIM")
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format="I") # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f"Illegal format `{format}`.")
return format, recformat
@classmethod
def _verify_keywords(
cls,
name=None,
format=None,
unit=None,
null=None,
bscale=None,
bzero=None,
disp=None,
start=None,
dim=None,
ascii=None,
coord_type=None,
coord_unit=None,
coord_ref_point=None,
coord_ref_value=None,
coord_inc=None,
time_ref_pos=None,
):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f"Column format option (TFORMn) failed verification: {err!s} "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
except AttributeError as err:
msg = (
"Column format option (TFORMn) must be a string with a valid "
f"FITS table format (got {format!s}: {err!s}). "
"The invalid value will be ignored for the purpose of "
"formatting the data in this column."
)
invalid["format"] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [
("name", name),
("unit", unit),
("bscale", bscale),
("bzero", bzero),
]:
if v is not None and v != "":
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != "":
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null)
)
else:
tnull_formats = ("B", "I", "J", "K")
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
"Column null option (TNULLn) must be an integer for "
"binary table columns (got {!r}). The invalid value "
"will be ignored for the purpose of formatting "
"the data in this column.".format(null)
)
elif not (
format.format in tnull_formats
or (
format.format in ("P", "Q") and format.p_format in tnull_formats
)
):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
"Column null option (TNULLn) is invalid for binary "
"table columns of type {!r} (got {!r}). The invalid "
"value will be ignored for the purpose of formatting "
"the data in this column.".format(format, null)
)
if msg is None:
valid["null"] = null
else:
invalid["null"] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != "":
msg = None
if not isinstance(disp, str):
msg = (
"Column disp option (TDISPn) must be a string (got "
f"{disp!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
elif isinstance(format, _AsciiColumnFormat) and disp[0].upper() == "L":
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column."
)
if msg is None:
try:
_parse_tdisp_format(disp)
valid["disp"] = disp
except VerifyError as err:
msg = (
"Column disp option (TDISPn) failed verification: "
f"{err!s} The invalid value will be ignored for the "
"purpose of formatting the data in this column."
)
invalid["disp"] = (disp, msg)
else:
invalid["disp"] = (disp, msg)
# Validate the start option
if start is not None and start != "":
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
"Column start option (TBCOLn) is not allowed for binary "
"table columns (got {!r}). The invalid keyword will be "
"ignored for the purpose of formatting the data in this "
"column.".format(start)
)
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
"Column start option (TBCOLn) must be a positive integer "
"(got {!r}). The invalid value will be ignored for the "
"purpose of formatting the data in this column.".format(start)
)
if msg is None:
valid["start"] = start
else:
invalid["start"] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != "":
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
"Column dim option (TDIMn) is not allowed for ASCII table "
"columns (got {!r}). The invalid keyword will be ignored "
"for the purpose of formatting this column.".format(dim)
)
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column."
)
if dims_tuple:
if isinstance(recformat, _FormatP):
# TDIMs have different meaning for VLA format,
# no warning should be thrown
msg = None
elif reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim
)
)
if msg is None:
valid["dim"] = dims_tuple
else:
invalid["dim"] = (dim, msg)
if coord_type is not None and coord_type != "":
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type)
)
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type)
)
if msg is None:
valid["coord_type"] = coord_type
else:
invalid["coord_type"] = (coord_type, msg)
if coord_unit is not None and coord_unit != "":
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit)
)
if msg is None:
valid["coord_unit"] = coord_unit
else:
invalid["coord_unit"] = (coord_unit, msg)
for k, v in [
("coord_ref_point", coord_ref_point),
("coord_ref_value", coord_ref_value),
("coord_inc", coord_inc),
]:
if v is not None and v != "":
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got"
" {!r}). The invalid value will be ignored for the purpose of"
" formatting the data in this column.".format(
k, ATTRIBUTE_TO_KEYWORD[k], v
)
)
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != "":
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos)
)
if msg is None:
valid["time_ref_pos"] = time_ref_pos
else:
invalid["time_ref_pos"] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format, _AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
"Columns cannot have both a start (TCOLn) and dim "
"(TDIMn) option, since the former is only applies to "
"ASCII tables, and the latter is only valid for binary tables."
)
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (
_AsciiColumnFormat if guess_format is _ColumnFormat else _ColumnFormat
)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims and format.format not in "PQ":
shape = dims[:-1] if "A" in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if "P" in format or "Q" in format:
return array
elif "A" in format:
if array.dtype.char in "SU":
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif "L" in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype("bool"):
return np.where(array == np.False_, ord("F"), ord("T"))
else:
return np.where(array == 0, ord("F"), ord("T"))
elif "X" in format:
return _convert_array(array, np.dtype("uint8"))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {
2: np.uint16(2**15),
4: np.uint32(2**31),
8: np.uint64(2**63),
}
if (
array.dtype.kind == "u"
and array.dtype.itemsize in bzeros
and self.bscale in (1, None, "")
and self.bzero == bzeros[array.dtype.itemsize]
):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace("i", "u")
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = "\x00"
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if hasattr(input, "_columns_type") and issubclass(input._columns_type, ColDefs):
klass = input._columns_type
elif hasattr(input, "_col_format_cls") and issubclass(
input._col_format_cls, _AsciiColumnFormat
):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .fitsrec import FITS_rec
from .hdu.table import _TableBaseHDU
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (
isinstance(input, FITS_rec)
and hasattr(input, "_coldefs")
and input._coldefs
):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError(
"Input to ColDefs must be a table HDU, a list "
"of Columns, or a record/field array."
)
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f"Element {idx} in the ColDefs input is not a Column.")
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
if ftype.kind == "O":
dtypes = {np.array(array[cname][i]).dtype for i in range(len(array))}
if (len(dtypes) > 1) or (np.dtype("O") in dtypes):
raise TypeError(
f"Column '{cname}' contains unsupported object types or "
f"mixed types: {dtypes}"
)
ftype = dtypes.pop()
format = self._col_format_cls.from_recformat(ftype)
format = f"P{format}()"
else:
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or "A" in format):
if "A" in format:
# should take into account multidimensional items in the column
dimel = int(re.findall("[0-9]+", str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = "(" + ",".join(str(d) for d in dim) + ")"
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == "u":
if "I" in format:
bzero = np.uint16(2**15)
elif "J" in format:
bzero = np.uint32(2**31)
elif "K" in format:
bzero = np.uint64(2**63)
c = Column(
name=cname,
format=format,
array=array.view(np.ndarray)[cname],
bzero=bzero,
dim=dim,
)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr["TFIELDS"]
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group("label")
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group("num"))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == "format":
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f"Invalid keyword for column {idx + 1}: {val[1]}", VerifyWarning
)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs["recformat"]
if "dim" in valid_kwargs:
valid_kwargs["dim"] = kwargs["dim"]
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]["array"] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if new_column.disp is not None and new_column.disp.upper().startswith("L"):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == "s":
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else "")
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim and format_.format not in "PQ":
# Note: VLA array descriptors should not be reshaped
# as they are always of shape (2,)
if format_.format == "A":
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({"names": self.names, "formats": formats, "offsets": offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = "ColDefs("
if hasattr(self, "columns") and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += "\n "
rep += "\n ".join([repr(c) for c in self.columns])
rep += "\n"
rep += ")"
return rep
def __add__(self, other, option="left"):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError("Wrong type of input.")
if option == "left":
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, "right")
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value, new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == "name":
del self.names
elif attr == "format":
del self.formats
self._notify(
"column_attribute_changed", column, idx, attr, old_value, new_value
)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify("column_added", self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify("load_data")
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify("column_removed", self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f"New name {new_name} already exists.")
else:
self.change_attrib(col_name, "name", new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, "unit", new_unit)
def info(self, attrib="all", output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ["all", ""]:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(",")
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == "s":
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write(
f"'{attr}' is not an attribute of the column definitions.\n"
)
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + "s")
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["a" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype="a"):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == "a":
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(f"Inconsistent input data array: {input}")
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a, dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == "a":
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
nelem = value.shape
len_value = np.prod(nelem)
self.max = max(self.max, len_value)
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype="uint8")
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == "a":
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == "a":
rowval = " " * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == "a":
data_output[idx] = chararray.array(encode_ascii(rowval), itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
nelem = data_output[idx].shape
descr_output[idx, 0] = np.prod(nelem)
descr_output[idx, 1] = _offset
_offset += descr_output[idx, 0] * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f"Format {tform!r} is not recognized.")
if repeat == "":
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f"Format {tform!r} is not recognized.")
# Be flexible on case
format = match.group("format")
if format is None:
# Floating point format
format = match.group("formatf").upper()
width = match.group("widthf")
precision = match.group("precision")
if width is None or precision is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group("width")
if width is None:
if strict:
raise VerifyError(
"Format {!r} is not unambiguously an ASCII table format."
)
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = (
"Format {!r} is not valid--field width and decimal precision "
"must be integers."
)
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError(
f"Format {tform!r} not valid--field width must be a positive integeter."
)
if precision >= width:
raise VerifyError(
f"Format {tform!r} not valid--the number of decimal digits "
f"must be less than the format's total width {width}."
)
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group("dims")
return tuple(int(d.strip()) for d in dims.split(","))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == "a" and f2[0] == "a":
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == "A":
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == "A" and option != "":
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ""
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == "X":
output_format = _FormatX(repeat)
elif dtype == "P":
output_format = _FormatP.from_tform(format)
elif dtype == "Q":
output_format = _FormatQ.from_tform(format)
elif dtype == "F":
output_format = "f8"
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == "U" or (
dtype.subdtype is not None and dtype.subdtype[0].char == "U"
):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype="i8").prod()
if nel > 1:
repeat = nel
if kind == "a":
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + "A"
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ""
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f"Illegal format `{format}`.")
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ("U", "S"):
recformat = kind = "a"
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == "a":
return "A" + str(itemsize)
elif NUMPY2FITS.get(recformat) == "L":
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return "A1"
elif kind == "i":
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS["I"][0])
return "I" + str(width)
elif kind == "f":
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = "D"
else:
format = "E"
width = ".".join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == "I":
if width <= 4:
recformat = "i2"
elif width > 9:
recformat = "i8"
elif format == "A":
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f"Format {format_type} is not recognized.")
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (ASCII_DEFAULT_WIDTHS[key][0] - ASCII_DEFAULT_WIDTHS[key][1])
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
f"Format {format_string} cannot be mapped to the accepted TDISPn "
"keyword values. Format will not be moved into TDISPn keyword.",
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
|
d7cc40f8a3879b311bce63974188f38fcb84ec0bf9a3902259626d2f581d80a6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import copy
import re
import warnings
from collections.abc import Iterable
import numpy as np
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
def _line_type(line, delimiter=None):
"""Interpret a QDP file line.
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
_command_re = r"READ [TS]ERR(\s+[0-9]+)+"
sep = delimiter
if delimiter is None:
sep = r"\s+"
_new_re = rf"NO({sep}NO)+"
_data_re = rf"({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)"
_type_re = rf"^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$"
_line_type_re = re.compile(_type_re, re.IGNORECASE)
line = line.strip()
if not line:
return "comment"
match = _line_type_re.match(line)
if match is None:
raise ValueError(f"Unrecognized QDP line: {line}")
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == "data":
return f"data,{len(val.split(sep=delimiter))}"
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type.
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith("data,"):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError("Inconsistent number of columns")
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
else:
raise ValueError("invalid value of qdb_file")
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands.
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip("!")
# Is this a comment?
if datatype == "comment":
comment_text += line + "\n"
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning,
)
command_lines += line + "\n"
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split("\n"):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in command[2:]]
if colnames is None:
colnames = _interpret_err_lines(err_specs, ncol, names=input_colnames)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v.upper() == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split(
"\n"
)
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns.
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith("_nerr"):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith("_perr"):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn(
"table_id not specified. Reading the first available table",
AstropyUserWarning,
)
table_id = 0
tables = _get_tables_from_qdp_file(
qdp_file, input_colnames=names, delimiter=delimiter
)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file.
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other Parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if "initial_comments" in table.meta and table.meta["initial_comments"] != []:
for line in table.meta["initial_comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if "comments" in table.meta and table.meta["comments"] != []:
for line in table.meta["comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, "w") as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables.
"""
delimiter = " "
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`.
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`.
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, "NO")]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = "qdp"
_io_registry_can_write = True
_io_registry_suffix = ".qdp"
_description = "Quick and Dandy Plotter"
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(
self.lines,
table_id=self.table_id,
names=self.names,
delimiter=self.delimiter,
)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
|
9fce3956afbe5f4fbf68733b31a7865ccbef510a5b493e9ec5a5fa59c3078534 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing Parquet
tables that are not meant to be used directly, but instead are
available as readers/writers in `astropy.table`. See
:ref:`astropy:table_io` for more details.
"""
import os
import warnings
import numpy as np
from astropy.utils import minversion
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
PARQUET_SIGNATURE = b"PAR1"
__all__ = [] # nothing is publicly scoped
def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith((".parquet", ".parq"))
else:
return False
def read_table_parquet(
input, include_names=None, exclude_names=None, schema_only=False, filters=None
):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
For example:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, "read"):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Column, Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if "table_meta_yaml" in md:
meta_yaml = md.pop("table_meta_yaml").split("\n")
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if "meta" in meta_hdr:
meta_dict = meta_hdr["meta"]
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if "__serialized_columns__" in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict["__serialized_columns__"]
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(
dict.fromkeys([x for x in full_table_columns.values() if x in use_names])
)
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict["__serialized_columns__"].keys()):
if scol not in use_names:
meta_dict["__serialized_columns__"].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Determine numpy/astropy types of columns from the arrow table.
dtype = []
for name in names_to_read:
t = schema.field(name).type
shape = None
if isinstance(t, pa.FixedSizeListType):
# The FixedSizeListType has an arrow value_type and a size.
value_type = t.value_type
shape = (t.list_size,)
elif isinstance(t, pa.ListType):
# The ListType (variable length arrays) has a value type.
value_type = t.value_type
else:
# All other arrow column types are the value_type.
value_type = t
if value_type not in (pa.string(), pa.binary()):
# Convert the pyarrow value type into a numpy dtype (which is returned
# by the to_pandas_type() method).
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(value_type.to_pandas_dtype())
else:
dtype.append((value_type.to_pandas_dtype(), shape))
continue
# Special-case for string and binary columns
md_name = f"table::len::{name}"
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(
f"No {md_name} found in metadata. Guessing {{strlen}} for schema.",
AstropyUserWarning,
)
else:
strlen = max(len(row.as_py()) for row in pa_table[name])
warnings.warn(
f"No {md_name} found in metadata. Using longest string"
f" ({strlen} characters).",
AstropyUserWarning,
)
strname = f"U{strlen}" if value_type == pa.string() else f"|S{strlen}"
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(strname)
else:
dtype.append((strname, shape))
if schema_only:
# If we only need the schema, create an empty table with the correct dtype.
data = np.zeros(0, dtype=list(zip(names_to_read, dtype)))
table = Table(data=data, meta=meta_dict)
else:
# If we need the full table, create the table and add the columns
# one at a time. This minimizes data copying.
table = Table(meta=meta_dict)
for name, dt in zip(names_to_read, dtype):
# First convert the arrow column to a numpy array.
col = pa_table[name].to_numpy()
t = schema.field(name).type
if t in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the correct type.
col = col.astype(dt)
elif isinstance(t, pa.FixedSizeListType):
# If it is a FixedSizeListType (array column) then it needs to
# be broken into a 2D array, but only if the table has a non-zero
# length.
if len(col) > 0:
col = np.stack(col)
if t.value_type in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the
# correct type.
# The conversion dtype is only the first element
# in the dtype tuple.
col = col.astype(dt[0])
else:
# This is an empty column, and needs to be created with the
# correct type.
col = np.zeros(0, dtype=dt)
elif isinstance(t, pa.ListType):
# If we have a variable length string/binary column,
# we need to convert each row to the proper type.
if t.value_type in (pa.string(), pa.binary()):
col = np.array([row.astype(dt) for row in col], dtype=np.object_)
table.add_column(Column(name=name, data=col))
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = {x["name"]: x for x in meta_hdr["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table
def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file.
The parquet writer supports tables with regular columns, fixed-size array
columns, and variable-length array columns (provided all arrays have the
same type).
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
Notes
-----
Tables written with array columns (fixed-size or variable-length) cannot
be read with pandas.
Raises
------
ValueError
If one of the columns has a mixed-type variable-length array, or
if it is a zero-length table and any of the columns are variable-length
arrays.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f"`output` should be a string or path-like, not {output}")
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as("parquet"):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = "\n".join(meta_yaml)
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# If the column type is np.object_, then it should be a column
# of variable-length arrays. This can be serialized with parquet
# provided all of the elements have the same data-type.
# Additionally, if the table has no elements, we cannot deduce
# the datatype, and hence cannot serialize the table.
if len(encode_table) > 0:
obj_dtype = encode_table[name][0].dtype
# Check that the variable-length array all has the same type.
for row in encode_table[name]:
if row.dtype != obj_dtype:
raise ValueError(
f"Cannot serialize mixed-type column ({name}) with parquet."
)
# Calling pa.list_() creates a ListType which is an array of variable-
# length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(obj_dtype.type),
)
else:
raise ValueError(
"Cannot serialize zero-length table "
f"with object column ({name}) with parquet."
)
elif len(dt.shape) > 0:
# This column has a shape, and is an array type column. Calling
# pa.list_() with a list_size creates a FixedSizeListType, which
# is an array of fixed-length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(dt.subdtype[0].type),
list_size=np.prod(dt.shape),
)
else:
# This is a standard column.
arrow_type = pa.from_numpy_dtype(dt.type)
type_list.append((name, arrow_type))
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
t = col.dtype.type
itemsize = col.dtype.itemsize
if t is np.object_:
t = encode_table[name][0].dtype.type
if t == np.str_ or t == np.bytes_:
# We need to scan through all of them.
itemsize = -1
for row in encode_table[name]:
itemsize = max(itemsize, row.dtype.itemsize)
if t is np.str_:
metadata[f"table::len::{name}"] = str(itemsize // 4)
elif t is np.bytes_:
metadata[f"table::len::{name}"] = str(itemsize)
metadata["table_meta_yaml"] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {
k.encode("UTF-8"): v.encode("UTF-8") for k, v in metadata.items()
}
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
# Parquet must be stored little-endian. When we use astype(..., copy=False)
# we get a very fast conversion when the dtype is unchanged, and only
# incur a cost when we need to do a byte-swap operation.
dt_new = dt.newbyteorder("<")
if dt.type == np.object_:
# Turn the column into a list of numpy arrays.
val = [row.astype(dt_new, copy=False) for row in encode_table[name]]
elif len(dt.shape) > 0:
if len(encode_table) > 0:
val = np.split(
encode_table[name].ravel().astype(dt_new.base, copy=False),
len(encode_table),
)
else:
val = []
else:
val = encode_table[name].astype(dt_new, copy=False)
arrays.append(pa.array(val, type=schema.field(name).type))
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table)
def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == "name":
all_names.append(v)
return all_names
def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("parquet", Table, read_table_parquet)
io_registry.register_writer("parquet", Table, write_table_parquet)
io_registry.register_identifier("parquet", Table, parquet_identify)
def get_pyarrow():
try:
import pyarrow as pa
from pyarrow import parquet
except ImportError:
raise Exception("pyarrow is required to read and write parquet files")
if minversion(pa, "6.0.0"):
writer_version = "2.4"
else:
writer_version = "2.0"
return pa, parquet, writer_version
|
115c0214acdd6fac707bfd285fcd06f14bbe97dddfb0e351abc01e89277c36df | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits._tiled_compression import compress_hdu, decompress_hdu_section
from astropy.io.fits._tiled_compression.utils import _data_shape, _n_tiles, _tile_shape
from astropy.io.fits.card import Card
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.column import TDEF_RE, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_get_array_mmap,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.utils import lazyproperty
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.shapes import simplify_basic_index
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU
from .image import ImageHDU
from .table import BinTableHDU
# This global variable is used e.g., when calling fits.open with
# disable_image_compression which temporarily changes the global variable to
# False. This should ideally be refactored to avoid relying on global module
# variables.
COMPRESSION_ENABLED = True
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: "NO_DITHER",
SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1",
SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2",
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = (
"NOCOMPRESS",
"RICE_1",
"GZIP_1",
"GZIP_2",
"PLIO_1",
"HCOMPRESS_1",
)
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = "RICE_1"
DEFAULT_QUANTIZE_LEVEL = 16.0
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"}
COMPRESSION_KEYWORDS = {
"ZIMAGE",
"ZCMPTYPE",
"ZBITPIX",
"ZNAXIS",
"ZMASKCMP",
"ZSIMPLE",
"ZTENSION",
"ZEXTEND",
}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
"SIMPLE": "ZSIMPLE",
"XTENSION": "ZTENSION",
"BITPIX": "ZBITPIX",
"NAXIS": "ZNAXIS",
"EXTEND": "ZEXTEND",
"BLOCKED": "ZBLOCKED",
"PCOUNT": "ZPCOUNT",
"GCOUNT": "ZGCOUNT",
"CHECKSUM": "ZHECKSUM",
"DATASUM": "ZDATASUM",
}
_zdef_re = re.compile(r"(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?")
_compression_keywords = set(_keyword_remaps.values()).union(
["ZIMAGE", "ZCMPTYPE", "ZMASKCMP", "ZQUANTIZ", "ZDITHER0"]
)
_indexed_compression_keywords = {"ZNAXIS", "ZTILE", "ZNAME", "ZVAL"}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
"The value appended to a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith(
"HIERARCH "
):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
"The value inserted into a Header must be either a keyword or "
"(keyword, value, [comment]) tuple; got: {!r}".format(card)
)
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False, after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after, replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(
card, before=remapped_before, after=remapped_after, replace=replace
)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = (
"Keyword {!r} is reserved for use by the FITS Tiled Image "
"Convention and will not be stored in the header for the "
"image being compressed.".format(keyword)
)
if keyword == "TFIELDS":
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group("label").upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group("label").upper()
num = m.group("num")
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == "NAXIS":
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f"ZNAXIS{index}"
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword, repeat))
return idx
def clear(self):
"""
Remove all cards from the header.
"""
self._table_header.clear()
super().clear()
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_load_variable_length_data = False
"""
We don't want to always load all the tiles so by setting this option
we can then access the tiles as needed.
"""
_default_name = "COMPRESSED_IMAGE"
@deprecated_renamed_argument(
"tile_size",
None,
since="5.3",
message="The tile_size argument has been deprecated. Use tile_shape "
"instead, but note that this should be given in the reverse "
"order to tile_size (tile_shape should be in Numpy C order).",
)
def __init__(
self,
data=None,
header=None,
name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_shape=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False,
scale_back=False,
tile_size=None,
):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``, ``'NOCOMPRESS'``
tile_shape : tuple, optional
Compression tile shape, which should be specified using the default
Numpy convention for array shapes (C order). The default is to
treat each row of image as a tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_shape`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_shape`` value of ``(100,100)`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if tile_shape is None and tile_size is not None:
tile_shape = tuple(tile_size[::-1])
elif tile_shape is not None and tile_size is not None:
raise ValueError(
"Cannot specify both tile_size and tile_shape. "
"Note that tile_size is deprecated and tile_shape "
"alone should be used."
)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(
header,
name,
compression_type=compression_type,
tile_shape=tile_shape,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed,
)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [
self._header.get("ZNAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("ZNAXIS", 0))
]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._bitpix = self._header["ZBITPIX"]
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if "EXTNAME" in header:
indices = header._keyword_indices["EXTNAME"]
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [
index for index in indices if header[index] == self._default_name
]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get("EXTNAME", self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if "EXTNAME" in self.header:
self.header["EXTNAME"] = value
else:
self.header["EXTNAME"] = (value, "extension name")
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != "XTENSION":
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ("BINTABLE", "A3DTABLE"):
return False
if "ZIMAGE" not in header or not header["ZIMAGE"]:
return False
return COMPRESSION_ENABLED
def _update_header_data(
self,
image_header,
name=None,
compression_type=None,
tile_shape=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None,
):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1', 'NOCOMPRESS'; if this value is `None`, use value
already in the header; if no value already in the header, use
'RICE_1'
tile_shape : tuple of int, optional
compression tile shape (in C order); if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2**32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and "EXTNAME" not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set(
"EXTNAME",
self._default_name,
"name of this binary table extension",
after="TFIELDS",
)
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
"Unknown compression type provided (supported are {}). "
"Default ({}) compression will be used.".format(
", ".join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE,
),
AstropyUserWarning,
)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set(
"ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS"
)
else:
compression_type = self.compression_type
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get("BZERO", 0.0)
bscale = image_header.get("BSCALE", 1.0)
after_keyword = "EXTNAME"
if bscale != 1.0:
self._header.set("BSCALE", bscale, after=after_keyword)
after_keyword = "BSCALE"
if bzero != 0.0:
self._header.set("BZERO", bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments["BITPIX"]
except (AttributeError, KeyError):
bitpix_comment = "data type of original image"
try:
naxis_comment = image_header.comments["NAXIS"]
except (AttributeError, KeyError):
naxis_comment = "dimension of original image"
# Set the label for the first column in the table
self._header.set(
"TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS"
)
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == "PLIO_1":
tform1 = "1QI" if huge_hdu else "1PI"
else:
tform1 = "1QB" if huge_hdu else "1PB"
self._header.set(
"TFORM1",
tform1,
"data format of field: variable length array",
after="TTYPE1",
)
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header["TTYPE1"], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header["BITPIX"]
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = "GZIP_COMPRESSED_DATA"
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = "1QB" if huge_hdu else "1PB"
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1")
self._header.set(
"TFORM2",
tform2,
"data format of field: variable length array",
after="TTYPE2",
)
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2")
self._header.set(
"TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3"
)
col3 = Column(name=self._header["TTYPE3"], format=self._header["TFORM3"])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3")
self._header.set(
"TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4"
)
after = "TFORM4"
col4 = Column(name=self._header["TTYPE4"], format=self._header["TFORM4"])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = "TFORM1"
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ["TTYPE2", "TFORM2", "TTYPE3", "TFORM3", "TTYPE4", "TFORM4"]
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes")
self._header.set(
"TFIELDS", ncols, "number of fields in each row", after="GCOUNT"
)
self._header.set(
"ZIMAGE", True, "extension contains compressed image", after=after
)
self._header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE")
self._header.set(
"ZNAXIS", self._image_header["NAXIS"], naxis_comment, after="ZBITPIX"
)
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header["ZNAXIS" + str(idx)]
del self._header["ZTILE" + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header["NAXIS"]
if not tile_shape:
tile_shape = []
elif len(tile_shape) != naxis:
warnings.warn(
"Provided tile size not appropriate for the data. "
"Default tile size will be used.",
AstropyUserWarning,
)
tile_shape = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == "HCOMPRESS_1":
if self._image_header["NAXIS1"] < 4 or self._image_header["NAXIS2"] < 4:
raise ValueError("Hcompress minimum image dimension is 4 pixels")
elif tile_shape:
if tile_shape[-1] < 4 or tile_shape[-2] < 4:
# user specified tile size is too small
raise ValueError("Hcompress minimum tile dimension is 4 pixels")
major_dims = len([ts for ts in tile_shape if ts > 1])
if major_dims > 2:
raise ValueError(
"HCOMPRESS can only support 2-dimensional tile sizes."
"All but two of the tile_shape dimensions must be set "
"to 1."
)
if tile_shape and (tile_shape[-1] == 0 and tile_shape[-2] == 0):
# compress the whole image as a single tile
tile_shape[-1] = self._image_header["NAXIS1"]
tile_shape[-2] = self._image_header["NAXIS2"]
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape[i] = 1
elif not tile_shape:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_shape = [self._image_header["NAXIS1"]]
if self._image_header["NAXIS2"] <= 30:
tile_shape.insert(0, self._image_header["NAXIS1"])
else:
# look for another good tile dimension
naxis2 = self._image_header["NAXIS2"]
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_shape.insert(0, dim)
break
else:
tile_shape.insert(0, 17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_shape.insert(0, 1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header["NAXIS1"] % tile_shape[-1] # 1st dimen
original_tile_shape = tile_shape[:]
if remain > 0 and remain < 4:
tile_shape[-1] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS1"] % tile_shape[-1]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 1st dimension has less than 4 pixels"
)
remain = self._image_header["NAXIS2"] % tile_shape[-2] # 2nd dimen
if remain > 0 and remain < 4:
tile_shape[-2] += 1 # try increasing tile size by 1
remain = self._image_header["NAXIS2"] % tile_shape[-2]
if remain > 0 and remain < 4:
raise ValueError(
"Last tile along 2nd dimension has less than 4 pixels"
)
if tile_shape != original_tile_shape:
warnings.warn(
f"The tile shape should be such that no tiles have "
f"fewer than 4 pixels. The tile shape has "
f"automatically been changed from {original_tile_shape} "
f"to {tile_shape}, but in future this will raise an "
f"error and the correct tile shape should be specified "
f"directly.",
AstropyDeprecationWarning,
)
# Set up locations for writing the next cards in the header.
last_znaxis = "ZNAXIS"
if self._image_header["NAXIS"] > 0:
after1 = "ZNAXIS1"
else:
after1 = "ZNAXIS"
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = "NAXIS" + str(idx + 1)
znaxis = "ZNAXIS" + str(idx + 1)
ztile = "ZTILE" + str(idx + 1)
if tile_shape and len(tile_shape) >= idx + 1:
ts = tile_shape[len(self._axes) - 1 - idx]
else:
if ztile not in self._header:
# Default tile size
if not idx:
ts = self._image_header["NAXIS1"]
else:
ts = 1
else:
ts = self._header[ztile]
tile_shape.insert(0, ts)
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= (axis - 1) // ts + 1
if image_header and naxis in image_header:
self._header.set(
znaxis, axis, image_header.comments[naxis], after=last_znaxis
)
else:
self._header.set(
znaxis, axis, "length of original image axis", after=last_znaxis
)
self._header.set(ztile, ts, "size of tiles to be compressed", after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set("NAXIS2", nrows, "number of rows in table")
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
if self._header[zname] == "NOISEBIT":
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == "SCALE ":
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == "SMOOTH ":
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = "ZNAME" + str(idx)
if zname not in self._header:
break
zval = "ZVAL" + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = "ZCMPTYPE"
idx = 1
if compression_type == "RICE_1":
self._header.set(
"ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword
)
self._header.set(
"ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1"
)
self._header.set(
"ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1"
)
if self._header["ZBITPIX"] == 8:
bytepix = 1
elif self._header["ZBITPIX"] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set(
"ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
elif compression_type == "HCOMPRESS_1":
self._header.set(
"ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword
)
self._header.set(
"ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1"
)
self._header.set(
"ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1"
)
self._header.set(
"ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2"
)
after_keyword = "ZVAL2"
idx = 3
if self._image_header["BITPIX"] < 0: # floating point image
self._header.set(
"ZNAME" + str(idx),
"NOISEBIT",
"floating point quantization level",
after=after_keyword,
)
self._header.set(
"ZVAL" + str(idx),
quantize_level,
"floating point quantization level",
after="ZNAME" + str(idx),
)
# Add the dither method and seed
if quantize_method:
if quantize_method not in [
NO_DITHER,
SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2,
]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn(
"Unknown quantization method provided. "
"Default method ({}) used.".format(name)
)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = "No dithering during quantization"
else:
zquantiz_comment = "Pixel Quantization Algorithm"
self._header.set(
"ZQUANTIZ",
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after="ZVAL" + str(idx),
)
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get("ZQUANTIZ", NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if "ZDITHER0" in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header["ZDITHER0"]
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif "ZDITHER0" in self._header:
dither_seed = self._header["ZDITHER0"]
else:
dither_seed = self._generate_dither_seed(DEFAULT_DITHER_SEED)
self._header.set(
"ZDITHER0",
dither_seed,
"dithering offset when quantizing floats",
after="ZQUANTIZ",
)
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if "SIMPLE" in image_header:
self._header.set(
"ZSIMPLE",
image_header["SIMPLE"],
image_header.comments["SIMPLE"],
before="ZBITPIX",
)
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if "EXTEND" in image_header:
self._header.set(
"ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"]
)
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if "BLOCKED" in image_header:
self._header.set(
"ZBLOCKED",
image_header["BLOCKED"],
image_header.comments["BLOCKED"],
)
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in image_header:
self._header.set(
"ZTENSION",
"IMAGE",
image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in image_header:
self._header.set(
"ZPCOUNT",
image_header["PCOUNT"],
image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in image_header:
self._header.set(
"ZGCOUNT",
image_header["GCOUNT"],
image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if "CHECKSUM" in image_header:
self._header.set(
"ZHECKSUM",
image_header["CHECKSUM"],
image_header.comments["CHECKSUM"],
)
if "DATASUM" in image_header:
self._header.set(
"ZDATASUM",
image_header["DATASUM"],
image_header.comments["DATASUM"],
)
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if "XTENSION" in self._image_header:
self._header.set(
"ZTENSION",
"IMAGE",
self._image_header.comments["XTENSION"],
before="ZBITPIX",
)
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if "PCOUNT" in self._image_header:
self._header.set(
"ZPCOUNT",
self._image_header["PCOUNT"],
self._image_header.comments["PCOUNT"],
after=last_znaxis,
)
if "GCOUNT" in self._image_header:
self._header.set(
"ZGCOUNT",
self._image_header["GCOUNT"],
self._image_header.comments["GCOUNT"],
after="ZPCOUNT",
)
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if "ZHECKSUM" in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
def _scale_data(self, data):
if self._orig_bzero != 0 or self._orig_bscale != 1:
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
if "BLANK" in self._header:
blanks = data == np.array(self._header["BLANK"], dtype="int32")
else:
blanks = None
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._orig_bzero, out=data, casting="unsafe")
if blanks is not None:
data = np.where(blanks, np.nan, data)
return data
@lazyproperty
def data(self):
"""
The decompressed data array.
Note that accessing this will cause all the tiles to be loaded,
decompressed, and combined into a single data array. If you do
not need to access the whole array, consider instead using the
:attr:`~astropy.io.fits.CompImageHDU.section` property.
"""
if len(self.compressed_data) == 0:
return None
# Since .section has general code to load any arbitrary part of the
# data, we can just use this - and the @lazyproperty on the current
# property will ensure that we do this only once.
data = self.section[...]
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (
not isinstance(data, np.ndarray) or data.dtype.fields is not None
):
raise TypeError(
"CompImageHDU data has incorrect type:{}; dtype.fields = {}".format(
type(data), data.dtype.fields
)
)
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__["data"]
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if "compressed_data" in self.__dict__:
del self.__dict__["compressed_data"]._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__["compressed_data"]
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, "_image_header"):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
hcomments = self._header.comments
if "ZSIMPLE" in self._header:
image_header.set(
"SIMPLE", self._header["ZSIMPLE"], hcomments["ZSIMPLE"], before=0
)
del image_header["XTENSION"]
elif "ZTENSION" in self._header:
if self._header["ZTENSION"] != "IMAGE":
warnings.warn(
"ZTENSION keyword in compressed extension != 'IMAGE'",
AstropyUserWarning,
)
image_header.set("XTENSION", "IMAGE", hcomments["ZTENSION"], before=0)
else:
image_header.set("XTENSION", "IMAGE", before=0)
image_header.set(
"BITPIX", self._header["ZBITPIX"], hcomments["ZBITPIX"], before=1
)
image_header.set("NAXIS", self._header["ZNAXIS"], hcomments["ZNAXIS"], before=2)
last_naxis = "NAXIS"
for idx in range(image_header["NAXIS"]):
znaxis = "ZNAXIS" + str(idx + 1)
naxis = znaxis[1:]
image_header.set(
naxis, self._header[znaxis], hcomments[znaxis], after=last_naxis
)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header["NAXIS"]
for keyword in list(image_header["NAXIS?*"]):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if "ZPCOUNT" in self._header:
image_header.set(
"PCOUNT",
self._header["ZPCOUNT"],
hcomments["ZPCOUNT"],
after=last_naxis,
)
else:
image_header.set("PCOUNT", 0, after=last_naxis)
if "ZGCOUNT" in self._header:
image_header.set(
"GCOUNT", self._header["ZGCOUNT"], hcomments["ZGCOUNT"], after="PCOUNT"
)
else:
image_header.set("GCOUNT", 1, after="PCOUNT")
if "ZEXTEND" in self._header:
image_header.set("EXTEND", self._header["ZEXTEND"], hcomments["ZEXTEND"])
if "ZBLOCKED" in self._header:
image_header.set("BLOCKED", self._header["ZBLOCKED"], hcomments["ZBLOCKED"])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if "ZHECKSUM" in self._header:
image_header.set(
"CHECKSUM", self._header["ZHECKSUM"], hcomments["ZHECKSUM"]
)
if "ZDATASUM" in self._header:
image_header.set("DATASUM", self._header["ZDATASUM"], hcomments["ZDATASUM"])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if "EXTNAME" in image_header and image_header["EXTNAME"] == self._default_name:
del image_header["EXTNAME"]
# Remove the PCOUNT GCOUNT cards if the uncompressed header is
# from a primary HDU
if "SIMPLE" in image_header:
del image_header["PCOUNT"]
del image_header["GCOUNT"]
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ""
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind(".") + 1 :]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header["NAXIS"]):
_shape += (self.header["NAXIS" + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header["BITPIX"]]
return (self.name, self.ver, class_name, len(self.header), _shape, _format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"=i{self.data.dtype.itemsize}",
)
try:
nrows = self._header["NAXIS2"]
tbsize = self._header["NAXIS1"] * nrows
self._header["PCOUNT"] = 0
if "THEAP" in self._header:
del self._header["THEAP"]
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Compress the data.
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compress_hdu(self)
finally:
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder(">")
buf = self.compressed_data
compressed_data = buf[: self._theap].view(dtype=dtype, type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale != 1 or bzero != 0:
_scale = bscale
_zero = bzero
else:
if option == "old":
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax":
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2.0**8 - 1)
else:
_zero = (_max + _min) / 2.0
# throw away -2^N
_scale = (_max - _min) / (2.0 ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting="unsafe")
self.header["BZERO"] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header["BZERO"]
if _scale != 1:
self.data /= _scale
self.header["BSCALE"] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header["BSCALE"]
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get("BZERO", 0)
self._bscale = self.header.get("BSCALE", 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header["BITPIX"] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if "CHECKSUM" in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set(
"CHECKSUM",
image_hdu.header["CHECKSUM"],
image_hdu.header.comments["CHECKSUM"],
)
if "DATASUM" in image_hdu.header:
self._image_header.set(
"DATASUM",
image_hdu.header["DATASUM"],
image_hdu.header.comments["DATASUM"],
)
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__["data"] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, "_imagedata"):
self.__dict__["data"] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (
closed
and self._data_loaded
and _get_array_mmap(self.compressed_data) is not None
):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _update_header_scale_info(self, dtype=None):
if not self._do_not_scale_image_data and not (
self._orig_bzero == 0 and self._orig_bscale == 1
):
for keyword in ["BSCALE", "BZERO"]:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header["BITPIX"]
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed)
)
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
tile_dims = self.tile_shape
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype="uint8").sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return (
(sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000
) + 1
else:
return seed
@property
def section(self):
"""
Efficiently access a section of the image array
This property can be used to access a section of the data without
loading and decompressing the entire array into memory.
The :class:`~astropy.io.fits.CompImageSection` object returned by this
attribute is not meant to be used directly by itself. Rather, slices of
the section return the appropriate slice of the data, and loads *only*
that section into memory. Any valid basic Numpy index can be used to
slice :class:`~astropy.io.fits.CompImageSection`.
Note that accessing data using :attr:`CompImageHDU.section` will always
load tiles one at a time from disk, and therefore when accessing a large
fraction of the data (or slicing it in a way that would cause most tiles
to be loaded) you may obtain better performance by using
:attr:`CompImageHDU.data`.
"""
return CompImageSection(self)
@property
def tile_shape(self):
"""
The tile shape used for the tiled compression.
This shape is given in Numpy/C order
"""
return tuple(
[
self._header[f"ZTILE{idx + 1}"]
for idx in range(self._header["ZNAXIS"] - 1, -1, -1)
]
)
@property
def compression_type(self):
"""
The name of the compression algorithm.
"""
return self._header.get("ZCMPTYPE", DEFAULT_COMPRESSION_TYPE)
class CompImageSection:
"""
Class enabling subsets of CompImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
self._data_shape = _data_shape(self.hdu._header)
self._tile_shape = _tile_shape(self.hdu._header)
self._n_dim = len(self._data_shape)
self._n_tiles = np.array(
_n_tiles(self._data_shape, self._tile_shape), dtype=int
)
@property
def shape(self):
return tuple(self._data_shape)
@property
def ndim(self):
return self.hdu._header["ZNAXIS"]
@property
def dtype(self):
return BITPIX2DTYPE[self.hdu._header["ZBITPIX"]]
def __getitem__(self, index):
# Shortcut if the whole data is requested (this is used by the
# data property, so we optimize it as it is frequently used)
if index is Ellipsis:
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = self._n_tiles - 1
data = decompress_hdu_section(self.hdu, first_tile_index, last_tile_index)
return self.hdu._scale_data(data)
index = simplify_basic_index(index, shape=self._data_shape)
# Determine for each dimension the first and last tile to extract
first_tile_index = np.zeros(self._n_dim, dtype=int)
last_tile_index = np.zeros(self._n_dim, dtype=int)
final_array_index = []
for dim, idx in enumerate(index):
if isinstance(idx, slice):
if idx.step > 0:
first_tile_index[dim] = idx.start // self._tile_shape[dim]
last_tile_index[dim] = (idx.stop - 1) // self._tile_shape[dim]
else:
stop = 0 if idx.stop is None else max(idx.stop - 1, 0)
first_tile_index[dim] = stop // self._tile_shape[dim]
last_tile_index[dim] = idx.start // self._tile_shape[dim]
# Because slices such as slice(5, 0, 1) can exist (which
# would be empty) we need to make sure last_tile_index is
# always larger than first_tile_index
last_tile_index = np.maximum(last_tile_index, first_tile_index)
if idx.step < 0 and idx.stop is None:
final_array_index.append(idx)
else:
final_array_index.append(
slice(
idx.start - self._tile_shape[dim] * first_tile_index[dim],
idx.stop - self._tile_shape[dim] * first_tile_index[dim],
idx.step,
)
)
else:
first_tile_index[dim] = idx // self._tile_shape[dim]
last_tile_index[dim] = first_tile_index[dim]
final_array_index.append(
idx - self._tile_shape[dim] * first_tile_index[dim]
)
data = decompress_hdu_section(self.hdu, first_tile_index, last_tile_index)
return self.hdu._scale_data(data[tuple(final_array_index)])
|
84db7806143d123f8ce570385c98e0db026b9c64a3d8482761ff7c0c9d59e718 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
exp = [True, True, False, True, False, True, True, True, False, False, True]
temp = f2[1].data.field(7)
assert (temp[0] == exp).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
columns_info = "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]"
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 30, "4R x 10C", columns_info, ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}"
assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}"
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x)
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
# We can't compare the whole array since the _VLF is an array of
# objects, hence we compare elementwise
for i in range(len(hdu[1].data["empty"])):
assert np.array_equal(
hdu[1].data["empty"][i], np.array([], dtype=np.int32)
)
def test_multidim_VLA_tables(self):
"""
Check if multidimensional VLF are correctly write and read.
See https://github.com/astropy/astropy/issues/12860
and https://github.com/astropy/astropy/issues/7810
"""
a = np.arange(5)
b = np.arange(7)
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdus:
print(hdus[1].data["test"][0])
assert hdus[1].columns.formats == ["PD(7)"]
assert np.array_equal(
hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
)
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
)
a = np.arange(10).reshape((5, 2))
b = np.arange(14).reshape((7, 2))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(14)"]
assert np.array_equal(
hdus[1].data["test"][0],
np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]),
)
assert np.array_equal(
hdus[1].data["test"][1],
np.array(
[
[0.0, 1.0],
[2.0, 3.0],
[4.0, 5.0],
[6.0, 7.0],
[8.0, 9.0],
[10.0, 11.0],
[12.0, 13.0],
]
),
)
a = np.arange(3).reshape((1, 3))
b = np.arange(6).reshape((2, 3))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(6)"]
assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]]))
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
)
def test_heterogeneous_VLA_tables(self):
"""
Check the behaviour of heterogeneous VLA object.
"""
# The column format fix the type of the arrays in the VLF object.
a = np.array([45, 30])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
hdu = fits.BinTableHDU.from_columns([c1])
assert hdu.data[0].array.dtype[0].subdtype[0] == "int32"
# Strings in the VLF object can't be added to the table
a = np.array([45, "thirty"])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
with pytest.raises(
ValueError, match=r"invalid literal for int\(\) with base 10"
):
fits.BinTableHDU.from_columns([c1])
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attributes to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
215053a4e3d1c234ad0509c5e896d01ef25e242261c77ad3d1f2c3dcf46103e1 | import gc
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.io import fits
from astropy.io.fits import (
BinTableHDU,
HDUList,
ImageHDU,
PrimaryHDU,
connect,
table_to_hdu,
)
from astropy.io.fits.column import (
_fortran_to_python_format,
_parse_tdisp_format,
python_to_tdisp,
)
from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names
from astropy.table import Column, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.time import Time
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ["value", "shape", "format", "scale", "location"]
compare_attrs = {
name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()
}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {
name: col
for name, col in mixin_cols.items()
if (
isinstance(col, Time)
and col.location.shape != ()
or isinstance(col, np.ndarray)
and col.dtype.kind == "O"
or isinstance(col, u.LogQuantity)
)
}
mixin_cols = {
name: col for name, col in mixin_cols.items() if name not in unsupported_cols
}
def equal_data(a, b):
return all(np.all(a[name] == b[name]) for name in a.dtype.names)
class TestSingleTable:
def setup_class(self):
self.data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
def test_simple(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmp_path):
filename = tmp_path / "test_simple.fit"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["A"] = 1
t1.meta["B"] = 2.3
t1.meta["C"] = "spam"
t1.meta["comments"] = ["this", "is", "a", "long", "comment"]
t1.meta["HISTORY"] = ["first", "second", "third"]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmp_path):
filename = tmp_path / "test_simple.fits"
t1 = Table(self.data)
t1.meta["ttype1"] = "spam"
with pytest.warns(
AstropyUserWarning,
match=(
"Meta-data keyword ttype1 "
"will be ignored since it conflicts with a FITS "
"reserved keyword"
),
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmp_path):
"""
Test that file type is recognized without extension
"""
filename = tmp_path / "test_simple"
t1 = Table(self.data)
t1.write(filename, overwrite=True, format="fits")
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_units(self, table_type, tmp_path):
filename = tmp_path / "test_with_units.fits"
t1 = table_type(self.data)
t1["a"].unit = u.m
t1["c"].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].unit == u.m
assert t2["c"].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmp_path):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = tmp_path / "test_with_units.fits"
unit = u.def_unit("bandpass_sol_lum")
t = QTable()
t["l"] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert "bandpass_sol_lum" in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(
u.UnitsWarning, match="'bandpass_sol_lum' did not parse"
) as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2["l"].unit, u.UnrecognizedUnit)
assert str(t2["l"].unit) == "bandpass_sol_lum"
assert np.all(t2["l"].value == t["l"].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3["l"].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({"bandpass_sol_lum": u.Lsun}):
t3 = QTable.read(filename)
assert t3["l"].unit is u.Lsun
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "Angstroms"
hdu.columns[2].unit = "ergs/(cm.s.Angstroms)"
with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)):
t = table_type.read(hdu)
assert t["a"].unit == u.AA
assert t["c"].unit == u.erg / (u.cm * u.s * u.AA)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_with_format(self, table_type, tmp_path):
filename = tmp_path / "test_with_format.fits"
t1 = table_type(self.data)
t1["a"].format = "{:5d}"
t1["b"].format = "{:>20}"
t1["c"].format = "{:6.2f}"
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2["a"].format == "{:5d}"
assert t2["b"].format == "{:>20}"
assert t2["c"].format == "{:6.2f}"
def test_masked(self, tmp_path):
filename = tmp_path / "test_masked.fits"
t1 = Table(self.data, masked=True)
t1.mask["a"] = [1, 0, 1, 0]
t1.mask["b"] = [1, 0, 0, 1]
t1.mask["c"] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
@pytest.mark.parametrize("masked", [True, False])
def test_masked_nan(self, masked, tmp_path):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype="f4")
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2["b"].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_masked_serialize_data_mask(self, tmp_path):
filename = tmp_path / "test_masked_nan.fits"
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=["a", "b", "c"])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2["a"].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2["b"].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(
t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1)
)
assert np.all(t1["a"].mask == t2["a"].mask)
assert np.all(t1["b"].mask == t2["b"].mask)
assert np.all(t1["c"].mask == t2["c"].mask)
def test_read_from_fileobj(self, tmp_path):
filename = tmp_path / "test_read_from_fileobj.fits"
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, "rb") as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = "RADIANS"
hdu.columns[1].unit = "spam"
hdu.columns[2].unit = "millieggs"
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize("table_type", (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmp_path):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = tmp_path / "test_nonstandard_units.fits"
spam = u.def_unit("spam")
t = table_type()
t["a"] = [1.0, 2.0, 3.0] * spam
with pytest.warns(AstropyUserWarning, match="spam") as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert "cannot be recovered in reading. " in str(w[0].message)
else:
assert "lost to non-astropy fits readers" in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert "TUNIT1" not in hdu.header
def test_memmap(self, tmp_path):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize("memmap", (False, True))
def test_character_as_bytes(self, tmp_path, memmap):
filename = tmp_path / "test_simple.fts"
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2["b"].dtype.kind == "U"
assert t3["b"].dtype.kind == "S"
assert equal_data(t2, t3)
# To avoid issues with open files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmp_path):
filename = tmp_path / "test_oned_single_element.fits"
table = Table({"x": [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read["x"].shape == (2, 1)
assert len(read["x"][0]) == 1
def test_write_append(self, tmp_path):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = tmp_path / "test_write_append.fits"
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmp_path):
t = Table(self.data)
filename = tmp_path / "test_write_overwrite.fits"
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmp_path):
filename = tmp_path / "test_inexact_format_parse_on_read.fits"
c1 = fits.Column(name="a", array=np.array([1, 2, np.nan]), format="E")
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
# using memmap also deactivate the masking
tab = Table.read(filename, memmap=True)
assert tab.mask is None
def test_mask_null_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a",
array=np.array([1, 2, 99, 60000], dtype="u2"),
format="I",
null=99,
bzero=32768,
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_str_on_read(self, tmp_path):
filename = tmp_path / "test_null_format_parse_on_read.fits"
col = fits.Column(
name="a", array=np.array([b"foo", b"bar", b""], dtype="|S3"), format="A3"
)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
def test_heterogeneous_VLA_tables(self, tmp_path):
"""
Check the behaviour of heterogeneous VLA object.
"""
filename = tmp_path / "test_table_object.fits"
msg = "Column 'col1' contains unsupported object types or mixed types: "
# The column format fix the type of the arrays in the VLF object.
a = np.array([45, 30])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
tab = Table({"col1": var})
with pytest.raises(TypeError, match=msg):
tab.write(filename)
# Strings in the VLF object can't be added to the table
a = np.array(["five", "thirty"])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
with pytest.raises(TypeError, match=msg):
tab.write(filename)
def test_write_object_tables_with_unified(self, tmp_path):
"""
Write objects with the unified I/O interface.
See https://github.com/astropy/astropy/issues/1906
"""
filename = tmp_path / "test_table_object.fits"
msg = r"Column 'col1' contains unsupported object types or mixed types: {dtype\('O'\)}"
# Make a FITS table with an object column
tab = Table({"col1": [None]})
with pytest.raises(TypeError, match=msg):
tab.write(filename)
def test_write_VLA_tables_with_unified(self, tmp_path):
"""
Write VLA objects with the unified I/O interface.
See https://github.com/astropy/astropy/issues/11323
"""
filename = tmp_path / "test_table_VLA.fits"
# Make a FITS table with a variable-length array column
a = np.array([45, 30])
b = np.array([11, 12, 13])
c = np.array([45, 55, 65, 75])
var = np.array([a, b, c], dtype=object)
tabw = Table({"col1": var})
tabw.write(filename)
tab = Table.read(filename)
assert np.array_equal(tab[0]["col1"], np.array([45, 30]))
assert np.array_equal(tab[1]["col1"], np.array([11, 12, 13]))
assert np.array_equal(tab[2]["col1"], np.array([45, 55, 65, 75]))
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "U1"), ("c", float)],
)
self.data2 = np.array(
list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])),
dtype=[("p", float), ("q", float)],
)
self.data3 = np.array(
list(zip([1, 2, 3, 4], [2.3, 4.5, 6.7, 8.9])),
dtype=[("A", int), ("B", float)],
)
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name="first")
hdu2 = BinTableHDU(self.data2, name="second")
hdu3 = ImageHDU(np.ones((3, 3)), name="third")
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings("always")
def test_read(self, tmp_path):
filename = tmp_path / "test_read.fits"
self.hdus.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = tmp_path / "test_read_2.fits"
self.hdusb.writeto(filename)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_0.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_with_hdu_1(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_1.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_with_hdu_2(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_2.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_with_hdu_3(self, tmp_path, hdu):
filename = tmp_path / "test_read_with_hdu_3.fits"
self.hdus.writeto(filename)
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmp_path):
filename = tmp_path / "test_read_with_hdu_4.fits"
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_with_hdu_missing(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_1.fits"
self.hdus1.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_with_hdu_warning(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_2.fits"
self.hdus2.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_in_last_hdu(self, tmp_path, hdu):
filename = tmp_path / "test_warn_with_hdu_3.fits"
self.hdus3.writeto(filename)
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)",
):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)",
):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == "No table found in hdu=0"
@pytest.mark.parametrize("hdu", [1, "first", None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [1, "first"])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize("hdu", [2, "second"])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize("hdu", [3, "third"])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match="No table found in hdu=3"):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize("hdu", [0, 2, "third"])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)",
):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)",
):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize("hdu", [0, 1, "third"])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(
AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)",
):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize("hdu", [None, 1, "first"])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename("data/tb.fits"))
assert np.all(t["c1"].mask == np.array([False, False]))
assert not hasattr(t["c2"], "mask")
assert not hasattr(t["c3"], "mask")
assert not hasattr(t["c4"], "mask")
assert np.all(t["c1"].data == np.array([1, 2]))
assert np.all(t["c2"].data == np.array([b"abc", b"xy "]))
assert_allclose(t["c3"].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t["c4"].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ["x", "y", "z"]
t = Table([a, b, c], names=("a", "b", "c"), meta={"name": "first table"})
t["a"].unit = "1.2"
with pytest.raises(
UnitScaleError,
match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\.",
):
t.write("t.fits", format="fits", overwrite=True)
@pytest.mark.parametrize(
"tdisp_str, format_return",
[
("EN10.5", ("EN", "10", "5", None)),
("F6.2", ("F", "6", "2", None)),
("B5.10", ("B", "5", "10", None)),
("E10.5E3", ("E", "10", "5", "3")),
("A21", ("A", "21", None, None)),
],
)
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize(
"tdisp_str, format_str_return",
[
("G15.4E2", "{:15.4g}"),
("Z5.10", "{:5x}"),
("I6.5", "{:6d}"),
("L8", "{:>8}"),
("E20.7", "{:20.7e}"),
],
)
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize(
"fmt_str, tdisp_str",
[
("{:3d}", "I3"),
("3d", "I3"),
("7.3f", "F7.3"),
("{:>4}", "A4"),
("{:7.4f}", "F7.4"),
("%5.3g", "G5.3"),
("%10s", "A10"),
("%.4f", "F13.4"),
],
)
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp("{:>7}", logical_dtype=True) == "L7"
def test_bool_column(tmp_path):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert hdul[1].data["col0"].dtype == np.dtype("bool")
assert np.all(hdul[1].data["col0"] == arr)
def test_unicode_column(tmp_path):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(["a", "b", "cd"])])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert np.all(hdul[1].data["col0"] == ["a", "b", "cd"])
assert hdul[1].header["TFORM1"] == "2A"
t2 = Table([np.array(["\N{SNOWMAN}"])])
with pytest.raises(UnicodeEncodeError):
t2.write(tmp_path / "test.fits", overwrite=True)
def test_unit_warnings_read_write(tmp_path):
filename = tmp_path / "test_unit.fits"
t1 = Table([[1, 2], [3, 4]], names=["a", "b"])
t1["a"].unit = "m/s"
t1["b"].unit = "not-a-unit"
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(
u.UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention():
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename("data/stddata.fits")
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables are present",
):
t = Table.read(filename)
assert t.meta["comments"] == [
"",
" *** End of mandatory fields ***",
"",
"",
" *** Column names ***",
"",
"",
" *** Column formats ***",
"",
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
"info.dtype",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# FITS does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting="safe")
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="fits")
t2 = Table.read(filename, format="fits", astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time) else serialized_names[name]
)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["HISTORY"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.fits"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my \n\n\n description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize("name_col", unsupported_cols.items())
@pytest.mark.xfail(reason="column type unsupported")
def test_fits_unsupported_mixin(self, name_col, tmp_path):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = tmp_path / "test_simple.fits"
name, col = name_col
Table([col], names=[name]).write(filename, format="fits")
def test_info_attributes_with_no_mixins(tmp_path):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = tmp_path / "test.fits"
t = Table([[1.0, 2.0]])
t["col0"].description = "hello" * 40
t["col0"].format = "{:8.4f}"
t["col0"].meta["a"] = {"b": "c"}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2["col0"].description == "hello" * 40
assert t2["col0"].format == "{:8.4f}"
assert t2["col0"].meta["a"] == {"b": "c"}
@pytest.mark.parametrize("method", ["set_cols", "names", "class"])
def test_round_trip_masked_table_serialize_mask(tmp_path, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = tmp_path / "test.fits"
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t["d"] = [1, 2, 3]
if method == "set_cols":
for col in t.itercols():
col.info.serialize_method["fits"] = "data_mask"
t.write(filename)
elif method == "names":
t.write(
filename,
serialize_method={
"a": "data_mask",
"b": "data_mask",
"c": "data_mask",
"d": "data_mask",
},
)
elif method == "class":
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmp_path):
filename = tmp_path / "test.fits"
t = Table(data=[Column([1, 2], "a", description="spam")])
t.meta["comments"] = ["a", "b"]
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta["comments"] == ["a", "b"]
def test_is_fits_gh_14305():
"""Regression test for https://github.com/astropy/astropy/issues/14305"""
assert not connect.is_fits("", "foo.bar", None)
|
27770300868866c8c1102e2e46628da274671a95f1d57f14e57cda23ddfcf481 | import numpy as np
import pytest
from astropy.io import ascii
from astropy.io.ascii.qdp import _get_lines_from_file, _read_table_qdp, _write_table_qdp
from astropy.table import Column, MaskedColumn, Table
from astropy.utils.exceptions import AstropyUserWarning
def test_get_tables_from_qdp_file(tmp_path):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.212439 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 -nan
"""
path = tmp_path / "test.qdp"
with open(path, "w") as fp:
print(example_qdp, file=fp)
table0 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=0)
assert table0.meta["initial_comments"][0].startswith("Swift")
assert table0.meta["comments"][0].startswith("WT -- hard data")
table2 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=2)
assert table2.meta["initial_comments"][0].startswith("Swift")
assert table2.meta["comments"][0].startswith("WT -- hardness")
assert np.isclose(table2["MJD_nerr"][0], -2.37847222222222e-05)
def lowercase_header(value):
"""Make every non-comment line lower case."""
lines = []
for line in value.splitlines():
if not line.startswith("!"):
line = line.lower()
lines.append(line)
return "\n".join(lines)
@pytest.mark.parametrize("lowercase", [False, True])
def test_roundtrip(tmp_path, lowercase):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 NO 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
! Add command, just to raise the warning.
READ TERR 1
! WT -- whatever
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
NO 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
"""
if lowercase:
example_qdp = lowercase_header(example_qdp)
path = str(tmp_path / "test.qdp")
path2 = str(tmp_path / "test2.qdp")
with open(path, "w") as fp:
print(example_qdp, file=fp)
with pytest.warns(AstropyUserWarning) as record:
table = _read_table_qdp(path, names=["MJD", "Rate"], table_id=0)
assert np.any(
[
"This file contains multiple command blocks" in r.message.args[0]
for r in record
]
)
_write_table_qdp(table, path2)
new_table = _read_table_qdp(path2, names=["MJD", "Rate"], table_id=0)
for col in new_table.colnames:
is_masked = np.array([np.ma.is_masked(val) for val in new_table[col]])
if np.any(is_masked):
# All NaN values are read as such.
assert np.ma.is_masked(table[col][is_masked])
is_nan = np.array(
[(not np.ma.is_masked(val) and np.isnan(val)) for val in new_table[col]]
)
# All non-NaN values are the same
assert np.allclose(new_table[col][~is_nan], table[col][~is_nan])
if np.any(is_nan):
# All NaN values are read as such.
assert np.isnan(table[col][is_nan])
assert np.allclose(new_table["MJD_perr"], [2.378472e-05, 1.1446759e-05])
for meta_name in ["initial_comments", "comments"]:
assert meta_name in new_table.meta
def test_read_example():
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
dat = ascii.read(example_qdp, format="qdp", table_id=1, names=["a", "b", "c", "d"])
t = Table.read(
example_qdp, format="ascii.qdp", table_id=1, names=["a", "b", "c", "d"]
)
assert np.allclose(t["a"], [54000, 55000])
assert t["c_err"][0] == 5.5
assert np.ma.is_masked(t["b"][0])
assert np.isnan(t["d"][1])
for col1, col2 in zip(t.itercols(), dat.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example(tmp_path):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
test_file = tmp_path / "test.qdp"
t = Table.read(
example_qdp, format="ascii.qdp", table_id=1, names=["a", "b", "c", "d"]
)
t.write(test_file, err_specs={"terr": [1], "serr": [3]})
t2 = Table.read(test_file, names=["a", "b", "c", "d"], table_id=0)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example_comma(tmp_path):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a,a(pos),a(neg),b,c,ce,d
53000.5,0.25,-0.5,1,1.5,3.5,2
54000.5,1.25,-1.5,2,2.5,4.5,3
NO,NO,NO,NO,NO
! Table 1 comment
!a,a(pos),a(neg),b,c,ce,d
54000.5,2.25,-2.5,NO,3.5,5.5,5
55000.5,3.25,-3.5,4,4.5,6.5,nan
"""
test_file = tmp_path / "test.qdp"
t = Table.read(
example_qdp, format="ascii.qdp", table_id=1, names=["a", "b", "c", "d"], sep=","
)
t.write(test_file, err_specs={"terr": [1], "serr": [3]})
t2 = Table.read(test_file, names=["a", "b", "c", "d"], table_id=0)
# t.values_equal(t2)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_read_write_simple(tmp_path):
test_file = tmp_path / "test.qdp"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3, 4]))
t1.add_column(
MaskedColumn(
data=[4.0, np.nan, 3.0, 1.0], name="b", mask=[False, False, False, True]
)
)
t1.write(test_file, format="ascii.qdp")
with pytest.warns(UserWarning) as record:
t2 = Table.read(test_file, format="ascii.qdp")
assert np.any(
[
"table_id not specified. Reading the first available table"
in r.message.args[0]
for r in record
]
)
assert np.allclose(t2["col1"], t1["a"])
assert np.all(t2["col1"] == t1["a"])
good = ~np.isnan(t1["b"])
assert np.allclose(t2["col2"][good], t1["b"][good])
def test_read_write_simple_specify_name(tmp_path):
test_file = tmp_path / "test.qdp"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
# Give a non-None err_specs
t1.write(test_file, format="ascii.qdp")
t2 = Table.read(test_file, table_id=0, format="ascii.qdp", names=["a"])
assert np.all(t2["a"] == t1["a"])
def test_get_lines_from_qdp(tmp_path):
test_file = str(tmp_path / "test.qdp")
text_string = "A\nB"
text_output = _get_lines_from_file(text_string)
with open(test_file, "w") as fobj:
print(text_string, file=fobj)
file_output = _get_lines_from_file(test_file)
list_output = _get_lines_from_file(["A", "B"])
for i, line in enumerate(["A", "B"]):
assert file_output[i] == line
assert list_output[i] == line
assert text_output[i] == line
|
a494da99e2c04cf7f4356caf9011041862c60b0cd97a07a3d25d5be6c25f03df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
Angle,
CartesianRepresentation,
EarthLocation,
Latitude,
Longitude,
SkyCoord,
SphericalCosLatDifferential,
SphericalRepresentation,
)
from astropy.io.misc.parquet import get_pyarrow, parquet_identify
from astropy.table import Column, NdarrayMixin, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.time import Time, TimeDelta
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat.optional_deps import HAS_PANDAS
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# Skip all tests in this file if we cannot import pyarrow
pyarrow = pytest.importorskip("pyarrow")
ALL_DTYPES = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
np.bool_,
"|S3",
"U3",
]
BIGENDIAN_DTYPES = [
np.dtype(">i4"),
np.dtype(">i8"),
np.dtype(">f4"),
np.dtype(">f8"),
]
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == "|S3":
return [b"abc", b"def", b"ghi"]
elif dtype == "U3":
return ["abc", "def", "ghi"]
else:
return [1, 2, 3]
def _default_array_values(dtype):
values = _default_values(dtype)
return [values for i in range(3)]
def _default_var_length_array_values(dtype):
values = _default_values(dtype)
return [
[
values[0],
],
[
values[0],
values[1],
],
[
values[0],
values[1],
values[2],
],
]
def test_read_write_simple(tmp_path):
"""Test writing/reading a simple parquet file."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_write_existing(tmp_path):
"""Test writing an existing file without overwriting."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file)
def test_read_write_existing_overwrite(tmp_path):
"""Test overwriting an existing file."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_fileobj(tmp_path):
"""Test reading a file object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import io
with io.FileIO(test_file, mode="r") as input_file:
t2 = Table.read(input_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_pathlikeobj(tmp_path):
"""Test reading a path-like object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import pathlib
p = pathlib.Path(test_file)
t2 = Table.read(p)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_wrong_fileobj():
"""Test reading an incorrect fileobject type."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
with pytest.raises(
TypeError, match="pyarrow can only open path-like or file-like objects."
):
Table.read(f, format="parquet")
def test_identify_wrong_fileobj():
"""Test identifying an incorrect fileobj."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
assert not parquet_identify("test", "test", f)
def test_identify_file_wrong_extension():
"""Test identifying an incorrect extension."""
assert not parquet_identify("test", "test.notparquet", None)
def test_identify_file_correct_extension():
"""Test identifying an incorrect extension."""
assert parquet_identify("test", "test.parquet", None)
assert parquet_identify("test", "test.parq", None)
def test_identify_file_noobject_nopath():
"""Test running identify with no object or path."""
assert not parquet_identify("test", None, None)
def test_write_wrong_type():
"""Test writing to a filename of the wrong type."""
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(TypeError, match="should be a string"):
t1.write(1212, format="parquet")
@pytest.mark.parametrize("dtype", ALL_DTYPES)
def test_preserve_single_dtypes(tmp_path, dtype):
"""Test that round-tripping a single column preserves datatypes."""
test_file = tmp_path / "test.parquet"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == values)
assert t2["a"].dtype == dtype
@pytest.mark.parametrize("dtype", BIGENDIAN_DTYPES)
def test_preserve_single_bigendian_dtypes(tmp_path, dtype):
"""Test that round-tripping a single big-endian column preserves data."""
test_file = tmp_path / "test.parquet"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == values)
# The parquet serialization will turn all arrays into little-endian.
assert t2["a"].dtype == dtype.newbyteorder("<")
@pytest.mark.parametrize("dtype", ALL_DTYPES)
def test_preserve_single_array_dtypes(tmp_path, dtype):
"""Test that round-tripping a single array column preserves datatypes."""
test_file = tmp_path / "test.parquet"
values = _default_array_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == t1["a"])
assert np.all(t2["a"].shape == np.array(values).shape)
assert t2["a"].dtype == dtype
@pytest.mark.parametrize("dtype", BIGENDIAN_DTYPES)
def test_preserve_single_bigendian_array_dtypes(tmp_path, dtype):
"""Test that round-tripping a single array column (big-endian) preserves data."""
test_file = tmp_path / "test.parquet"
values = _default_array_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == t1["a"])
assert np.all(t2["a"].shape == np.array(values).shape)
assert t2["a"].dtype == dtype.newbyteorder("<")
@pytest.mark.parametrize("dtype", ALL_DTYPES)
def test_preserve_single_var_length_array_dtypes(tmp_path, dtype):
"""
Test that round-tripping a single variable length array column preserves
datatypes.
"""
test_file = tmp_path / "test.parquet"
values = _default_var_length_array_values(dtype)
t1 = Table()
data = np.array([np.array(val, dtype=dtype) for val in values], dtype=np.object_)
t1.add_column(Column(name="a", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for row1, row2 in zip(t1["a"], t2["a"]):
assert np.all(row1 == row2)
assert row1.dtype == row2.dtype
@pytest.mark.parametrize("dtype", BIGENDIAN_DTYPES)
def test_preserve_single_bigendian_var_length_array_dtypes(tmp_path, dtype):
"""
Test that round-tripping a single big-endian variable length array column preserves
datatypes.
"""
test_file = tmp_path / "test.parquet"
values = _default_var_length_array_values(dtype)
t1 = Table()
data = np.array([np.array(val, dtype=dtype) for val in values], dtype=np.object_)
t1.add_column(Column(name="a", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for row1, row2 in zip(t1["a"], t2["a"]):
assert np.all(row1 == row2)
assert row1.dtype.newbyteorder(">") == row2.dtype.newbyteorder(">")
def test_preserve_all_dtypes(tmp_path):
"""Test that round-tripping preserves a table with all the datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
arr_values = _default_array_values(dtype)
t1.add_column(
Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype))
)
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
arr_values = _default_array_values(dtype)
assert np.all(t2[str(dtype) + "_arr"] == values)
assert t2[str(dtype)].dtype == dtype
assert np.all(t2[str(dtype) + "_arr"].shape == np.array(arr_values).shape)
# Test just reading the schema
schema2 = Table.read(test_file, schema_only=True)
assert len(schema2) == 0
assert schema2.dtype == t2.dtype
def test_preserve_all_var_length_dtypes(tmp_path):
"""Test that round-tripping preserves a table with all the var length datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
data = np.array(
[np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_
)
t1.add_column(Column(name=str(dtype) + "_varr", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
colname = str(dtype) + "_varr"
for row1, row2 in zip(t1[colname], t2[colname]):
assert np.all(row1 == row2)
assert row1.dtype == row2.dtype
def test_write_empty_tables(tmp_path):
"""Test that we can save an empty table with var length datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
arr_values = _default_array_values(dtype)
t1.add_column(
Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype))
)
# Write an empty table with values and arrays, and confirm it works.
data = np.zeros(0, dtype=t1.dtype)
t2 = Table(data=data)
t2.write(test_file)
t3 = Table.read(test_file)
assert t3.dtype == t2.dtype
test_file2 = tmp_path / "test2.parquet"
t4 = Table()
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
data = np.array(
[np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_
)
t4.add_column(Column(name=str(dtype) + "_varr", data=data))
# Write an empty table with variable-length arrays, and confirm this
# raises an exception. (The datatype of an np.object_ type column
# cannot be inferred from an empty table.)
data = np.zeros(0, dtype=t4.dtype)
t5 = Table(data=data)
with pytest.raises(ValueError, match="Cannot serialize zero-length table") as err:
t5.write(test_file2)
def test_heterogeneous_var_array_table(tmp_path):
"""Test exception when trying to serialize a mixed-type variable-length column."""
test_file = tmp_path / "test.parquet"
t1 = Table()
data = np.array(
[
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 1, 2, 3, 4], dtype=np.float64),
],
dtype=np.object_,
)
t1.add_column(Column(name="a", data=data))
with pytest.raises(ValueError, match="Cannot serialize mixed-type column") as err:
t1.write(test_file)
def test_preserve_meta(tmp_path):
"""Test that writing/reading preserves metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["a"] = 1
t1.meta["b"] = "hello"
t1.meta["c"] = 3.14159
t1.meta["d"] = True
t1.meta["e"] = np.array([1, 2, 3])
t1.write(test_file)
t2 = Table.read(test_file)
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
def test_preserve_serialized(tmp_path):
"""Test that writing/reading preserves unit/format/description."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
def test_metadata_very_large(tmp_path):
"""Test that very large datasets work"""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2**16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2**18)
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
def test_fail_meta_serialize(tmp_path):
"""Test that we cannot preserve objects in metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["f"] = str
with pytest.raises(Exception) as err:
t1.write(test_file)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
"""Convenient routine to check objects and attributes match."""
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
# Testing Parquet table read/write with mixins. This is mostly
# copied from HDF5/FITS mixin testing, and it might be good to unify it.
# Analogous tests also exist for ECSV.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 1 * u.kpc)
cr = CartesianRepresentation([0, 1] * u.pc, [4, 5] * u.pc, [8, 6] * u.pc)
sd = SphericalCosLatDifferential(
[0, 1] * u.mas / u.yr, [0, 1] * u.mas / u.yr, 10 * u.km / u.s
)
srd = SphericalRepresentation(sr, differentials=sd)
sc = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
scd = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,m",
frame="fk4",
obstime=["J1990.5", "J1991.5"],
)
scdc = scd.copy()
scdc.representation_type = "cartesian"
scpm = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
)
scpmrv = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
radial_velocity=[11, 12] * u.km / u.s,
)
scrv = SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,pc", radial_velocity=[11, 12] * u.km / u.s
)
tm = Time([2450814.5, 2450815.5], format="jd", scale="tai", location=el)
# NOTE: in the test below the name of the column "x" for the Quantity is
# important since it tests the fix for #10215 (namespace clash, where "x"
# clashes with "el2.x").
mixin_cols = {
"tm": tm,
"dt": TimeDelta([1, 2] * u.day),
"sc": sc,
"scd": scd,
"scdc": scdc,
"scpm": scpm,
"scpmrv": scpmrv,
"scrv": scrv,
"x": [1, 2] * u.m,
"qdb": [10, 20] * u.dB(u.mW),
"qdex": [4.5, 5.5] * u.dex(u.cm / u.s**2),
"qmag": [21, 22] * u.ABmag,
"lat": Latitude([1, 2] * u.deg),
"lon": Longitude([1, 2] * u.deg, wrap_angle=180.0 * u.deg),
"ang": Angle([1, 2] * u.deg),
"el2": el2,
"sr": sr,
"cr": cr,
"sd": sd,
"srd": srd,
}
time_attrs = ["value", "shape", "format", "scale", "location"]
compare_attrs = {
"c1": ["data"],
"c2": ["data"],
"tm": time_attrs,
"dt": ["shape", "value", "format", "scale"],
"sc": ["ra", "dec", "representation_type", "frame.name"],
"scd": ["ra", "dec", "distance", "representation_type", "frame.name"],
"scdc": ["x", "y", "z", "representation_type", "frame.name"],
"scpm": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"representation_type",
"frame.name",
],
"scpmrv": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"radial_velocity",
"representation_type",
"frame.name",
],
"scrv": [
"ra",
"dec",
"distance",
"radial_velocity",
"representation_type",
"frame.name",
],
"x": ["value", "unit"],
"qdb": ["value", "unit"],
"qdex": ["value", "unit"],
"qmag": ["value", "unit"],
"lon": ["value", "unit", "wrap_angle"],
"lat": ["value", "unit"],
"ang": ["value", "unit"],
"el2": ["x", "y", "z", "ellipsoid"],
"nd": ["x", "y", "z"],
"sr": ["lon", "lat", "distance"],
"cr": ["x", "y", "z"],
"sd": ["d_lon_coslat", "d_lat", "d_distance"],
"srd": [
"lon",
"lat",
"distance",
"differentials.s.d_lon_coslat",
"differentials.s.d_lat",
"differentials.s.d_distance",
],
}
def test_parquet_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="parquet")
t2 = Table.read(filename, format="parquet")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.parquet"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
if isinstance(t[name], NdarrayMixin):
pytest.xfail("NdarrayMixin not supported")
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmp_path):
"""Test round-trip of MaskedColumn through Parquet using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.parquet"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"] = [b"c", b"d", b"e"]
t["c"].mask[1] = True
t.write(filename, format="parquet")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_one_name(table_cls, tmp_path):
"""Test write all cols at once, and read one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
for name in names:
t2 = table_cls.read(filename, format="parquet", include_names=[name])
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t2.colnames == [name]
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_exclude_names(table_cls, tmp_path):
"""Test write all cols at once, and read all but one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", exclude_names=names[0:5])
assert t.colnames[5:] == t2.colnames
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_no_columns(table_cls, tmp_path):
"""Test write all cols at once, and try to read no valid columns."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
with pytest.raises(ValueError, match="No include_names specified"):
t2 = table_cls.read(
filename,
format="parquet",
include_names=["not_a_column", "also_not_a_column"],
)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_schema(table_cls, tmp_path):
"""Test write all cols at once, and read the schema."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", schema_only=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
assert len(t2) == 0
def test_parquet_filter(tmp_path):
"""Test reading a parquet file with a filter."""
filename = tmp_path / "test_simple.parquet"
t1 = Table()
t1["a"] = Column(data=np.arange(100), dtype=np.int32)
t1["b"] = Column(data=np.arange(100, 0, -1), dtype=np.float64)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, filters=[("a", "<", 50)])
assert t2["a"].max() < 50
t2 = Table.read(filename, filters=[("b", "<", 50)])
assert t2["b"].max() < 50
def test_parquet_read_generic(tmp_path):
"""Test reading a generic parquet file."""
filename = tmp_path / "test_generic.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
# Write the table generically via pyarrow.parquet
names = t1.dtype.names
type_list = [
(name, pyarrow.from_numpy_dtype(t1[name].dtype.type)) for name in names
]
schema = pyarrow.schema(type_list)
_, parquet, writer_version = get_pyarrow()
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(filename, schema, version=writer_version) as writer:
arrays = [pyarrow.array(t1[name].data) for name in names]
writer.write_table(pyarrow.Table.from_arrays(arrays, schema=schema))
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
def test_parquet_read_pandas(tmp_path):
"""Test reading a pandas parquet file."""
filename = tmp_path / "test_pandas.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
df = t1.to_pandas()
# We use version='2.0' for full support of datatypes including uint32.
_, _, writer_version = get_pyarrow()
df.to_parquet(filename, version=writer_version)
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
|
526ad9f93f0e746d5bbc180bbce3f63cce71c2eb7063b282e7721920d226afe5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Comparison functions for `astropy.cosmology.Cosmology`.
This module is **NOT** public API. To use these functions, import them from
the top-level namespace -- :mod:`astropy.cosmology`.
This module will be moved.
"""
from __future__ import annotations
import functools
import inspect
from typing import Any, Callable, Union
import numpy as np
from numpy import False_, True_, ndarray
from astropy import table
from astropy.cosmology.core import Cosmology
__all__ = [] # Nothing is scoped here
##############################################################################
# PARAMETERS
_FormatType = Union[bool, None, str]
_FormatsT = Union[_FormatType, tuple[_FormatType, ...]]
_CompFnT = Callable[[Any, _FormatType], Cosmology]
_COSMO_AOK: set[Any] = {None, True_, False_, "astropy.cosmology"}
# The numpy bool also catches real bool for ops "==" and "in"
##############################################################################
# UTILITIES
class _CosmologyWrapper:
"""
A private wrapper class to hide things from :mod:`numpy`.
This should never be exposed to the user.
"""
__slots__ = ("wrapped",)
# Use less memory and speed up initialization.
_cantbroadcast: tuple[type, ...] = (table.Row, table.Table)
"""
Have to deal with things that do not broadcast well. e.g.
`~astropy.table.Row` cannot be used in an array, even if ``dtype=object``
and will raise a segfault when used in a `numpy.ufunc`.
"""
wrapped: Any
def __init__(self, wrapped: Any) -> None:
self.wrapped = wrapped
# TODO! when py3.9+ use @functools.partial(np.frompyfunc, nin=2, nout=1)
# TODO! https://github.com/numpy/numpy/issues/9477 segfaults on astropy.row
# and np.vectorize can't coerce table to dtypes
def _wrap_to_ufunc(nin: int, nout: int) -> Callable[[_CompFnT], np.ufunc]:
def wrapper(pyfunc: _CompFnT) -> np.ufunc:
ufunc = np.frompyfunc(pyfunc, 2, 1)
return ufunc
return wrapper
@_wrap_to_ufunc(2, 1)
def _parse_format(cosmo: Any, format: _FormatType, /) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = "/".join(map(str, _COSMO_AOK))
raise ValueError(
f"for parsing a Cosmology, 'format' must be {allowed}, not {format}"
)
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(
f"if 'format' is False, arguments must be a Cosmology, not {cosmo}"
)
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [
c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)
]
return _parse_format(wcosmos, formats)
def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(
f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given"
)
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper
##############################################################################
# COMPARISON FUNCTIONS
@_comparison_decorator
def cosmology_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = cosmo1 == cosmo2
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq
@_comparison_decorator
def _cosmology_not_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq
|
34fceab0d78311e19ea6e770f4944093028073786a5beb1933450876afeb3d03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
from typing import TYPE_CHECKING, Any, TypeVar
import numpy as np
from numpy import inf, sin
import astropy.constants as const
import astropy.units as u
from astropy.cosmology.core import Cosmology, FlatCosmologyMixin
from astropy.cosmology.parameter import (
Parameter,
_validate_non_negative,
_validate_with_unit,
)
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["FLRW", "FlatFLRWMixin"]
__doctest_requires__ = {"*": ["scipy"]}
if TYPE_CHECKING:
from collections.abc import Mapping
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
##############################################################################
# Parameters
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
_sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
_critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
_radian_in_arcsec = (1 * u.rad).to(u.arcsec)
_radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
_a_B_c2 = (4 * const.sigma_sb / const.c**3).cgs.value
# Boltzmann constant in eV / K
_kB_evK = const.k_B.to(u.eV / u.K)
# typing
_FLRWT = TypeVar("_FLRWT", bound="FLRW")
_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
##############################################################################
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(
doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)",
fvalidate="scalar",
)
Om0 = Parameter(
doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative",
)
Ode0 = Parameter(
doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float",
)
Tcmb0 = Parameter(
doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin",
fvalidate="scalar",
)
Neff = Parameter(
doc="Number of effective neutrino species.", fvalidate="non-negative"
)
m_nu = Parameter(
doc="Mass of neutrino species.", unit="eV", equivalencies=u.mass_energy()
)
Ob0 = Parameter(
doc="Omega baryon; baryonic matter density/critical density at z=0."
)
def __init__(
self,
H0,
Om0,
Ode0,
Tcmb0=0.0 * u.K,
Neff=3.04,
m_nu=0.0 * u.eV,
Ob0=None,
*,
name=None,
meta=None,
):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * _H0units_to_invs
# Hubble time
self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = _critdens_const * H0_s**2
self._critical_density0 = cd0value << u.g / u.cm**3
# Compute photon density from Tcmb
self._Ogamma0 = _a_B_c2 * self._Tcmb0.value**4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = (
self._m_nu[massive].value if self._massivenu else None
)
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError(
"baryonic density can not be larger than total matter density."
)
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError(
"unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}."
)
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""
Temperature of the neutrino background as `~astropy.units.Quantity` at z=0.
"""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError(
"Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density"
)
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return (
prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1.0 + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array(
[quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z]
)
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
)
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (
self._Onu0
if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0)
+ self._Ode0 * self.de_density_scale(z)
) ** (-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) # fmt: skip
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(
f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).",
AstropyUserWarning,
)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5.0 * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh**3 / (2.0 * Ok0) * u.Mpc**3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm**2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError(
"subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`"
)
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@lazyproperty
def nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self.__nonflatclass__._init_signature.bind_partial(
**self._init_arguments, Ode0=self.Ode0
)
# Make new instance, respecting args vs kwargs
inst = self.__nonflatclass__(*ba.args, **ba.kwargs)
# Because of machine precision, make sure parameters exactly match
for n in inst.__all_parameters__ + ("Ok0",):
setattr(inst, "_" + n, getattr(self, n))
return inst
def clone(
self, *, meta: Mapping | None = None, to_nonflat: bool = None, **kwargs: Any
):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool or None, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
With 'to_nonflat' `True`, ``Ode0`` can be modified.
>>> Planck13.clone(to_nonflat=True, Ode0=1)
LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s),
Om0=0.30712, Ode0=1.0, ...
"""
return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return (
1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
)
|
461ee8017399414881dd7c1a37527eca85bc5c06b798d674a6fec68f29788780 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file is the main file used when running tests with pytest directly,
# in particular if running e.g. ``pytest docs/``.
import os
import tempfile
import hypothesis
from astropy import __version__
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
# This has to be in the root dir or it will not display in CI.
def pytest_configure(config):
PYTEST_HEADER_MODULES["PyERFA"] = "erfa"
PYTEST_HEADER_MODULES["Cython"] = "cython"
PYTEST_HEADER_MODULES["Scikit-image"] = "skimage"
PYTEST_HEADER_MODULES["pyarrow"] = "pyarrow"
PYTEST_HEADER_MODULES["asdf-astropy"] = "asdf_astropy"
TESTED_VERSIONS["Astropy"] = __version__
# This has to be in the root dir or it will not display in CI.
def pytest_report_header(config):
# This gets added after the pytest-astropy-header output.
return (
f'CI: {os.environ.get("CI", "undefined")}\n'
f'ARCH_ON_CI: {os.environ.get("ARCH_ON_CI", "undefined")}\n'
f'IS_CRON: {os.environ.get("IS_CRON", "undefined")}\n'
)
# Tell Hypothesis that we might be running slow tests, to print the seed blob
# so we can easily reproduce failures from CI, and derive a fuzzing profile
# to try many more inputs when we detect a scheduled build or when specifically
# requested using the HYPOTHESIS_PROFILE=fuzz environment variable or
# `pytest --hypothesis-profile=fuzz ...` argument.
hypothesis.settings.register_profile(
"ci", deadline=None, print_blob=True, derandomize=True
)
hypothesis.settings.register_profile(
"fuzzing", deadline=None, print_blob=True, max_examples=1000
)
default = (
"fuzzing"
if (
os.environ.get("IS_CRON") == "true"
and os.environ.get("ARCH_ON_CI") not in ("aarch64", "ppc64le")
)
else "ci"
)
hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", default))
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config")
os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache")
os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy"))
os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy"))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
|
96e3740aa5112028edfba18fcceb8f635927816a92c951e43f833fa7c8a73fed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import builtins
import os
import sys
import tempfile
import warnings
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
import pytest
from astropy import __version__
# This is needed to silence a warning from matplotlib caused by
# PyInstaller's matplotlib runtime hook. This can be removed once the
# issue is fixed upstream in PyInstaller, and only impacts us when running
# the tests from a PyInstaller bundle.
# See https://github.com/astropy/astropy/issues/10785
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
# The above checks whether we are running in a PyInstaller bundle.
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning)
# Note: while the filterwarnings is required, this import has to come after the
# filterwarnings above, because this attempts to import matplotlib:
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib
matplotlibrc_cache = {}
@pytest.fixture
def ignore_matplotlibrc():
# This is a fixture for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
from matplotlib import pyplot as plt
with plt.style.context({}, after_reset=True):
yield
@pytest.fixture
def fast_thread_switching():
"""Fixture that reduces thread switching interval.
This makes it easier to provoke race conditions.
"""
old = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
yield
sys.setswitchinterval(old)
def pytest_configure(config):
from astropy.utils.iers import conf as iers_conf
# Disable IERS auto download for testing
iers_conf.auto_download = False
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
matplotlib.use("Agg")
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration. Note that this
# is also set in the test runner, but we need to also set it here for
# things to work properly in parallel mode
builtins._xdg_config_home_orig = os.environ.get("XDG_CONFIG_HOME")
builtins._xdg_cache_home_orig = os.environ.get("XDG_CACHE_HOME")
os.environ["XDG_CONFIG_HOME"] = tempfile.mkdtemp("astropy_config")
os.environ["XDG_CACHE_HOME"] = tempfile.mkdtemp("astropy_cache")
os.mkdir(os.path.join(os.environ["XDG_CONFIG_HOME"], "astropy"))
os.mkdir(os.path.join(os.environ["XDG_CACHE_HOME"], "astropy"))
config.option.astropy_header = True
PYTEST_HEADER_MODULES["PyERFA"] = "erfa"
PYTEST_HEADER_MODULES["Cython"] = "cython"
PYTEST_HEADER_MODULES["Scikit-image"] = "skimage"
PYTEST_HEADER_MODULES["asdf-astropy"] = "asdf_astropy"
TESTED_VERSIONS["Astropy"] = __version__
def pytest_unconfigure(config):
from astropy.utils.iers import conf as iers_conf
# Undo IERS auto download setting for testing
iers_conf.reset("auto_download")
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
if builtins._xdg_config_home_orig is None:
os.environ.pop("XDG_CONFIG_HOME")
else:
os.environ["XDG_CONFIG_HOME"] = builtins._xdg_config_home_orig
if builtins._xdg_cache_home_orig is None:
os.environ.pop("XDG_CACHE_HOME")
else:
os.environ["XDG_CACHE_HOME"] = builtins._xdg_cache_home_orig
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get("failed"):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
"Some tests may fail when run from the IPython prompt; "
"especially, but not limited to tests involving logging and warning "
"handling. Unless you are certain as to the cause of the failure, "
"please check that the failure occurs outside IPython as well. See "
"https://docs.astropy.org/en/stable/known_issues.html#failing-logging-"
"tests-when-running-the-tests-in-ipython for more information.",
yellow=True,
bold=True,
)
|
6658a18cc5085ae1d65e417e3e7a2284d5e920cf6403ff2f56d23d6e0008f70c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines a logging class based on the built-in logging module.
.. note::
This module is meant for internal ``astropy`` usage. For use in other
packages, we recommend implementing your own logger instead.
"""
import inspect
import logging
import os
import sys
import warnings
from contextlib import contextmanager
from . import conf as _conf
from . import config as _config
from .utils import find_current_module
from .utils.exceptions import AstropyUserWarning, AstropyWarning
__all__ = ["Conf", "conf", "log", "AstropyLogger", "LoggingError"]
# import the logging levels from logging so that one can do:
# log.setLevel(log.DEBUG), for example
logging_levels = [
"NOTSET",
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL",
"FATAL",
]
for level in logging_levels:
globals()[level] = getattr(logging, level)
__all__ += logging_levels
# Initialize by calling _init_log()
log = None
class LoggingError(Exception):
"""
This exception is for various errors that occur in the astropy logger,
typically when activating or deactivating logger-related features.
"""
class _AstLogIPYExc(Exception):
"""
An exception that is used only as a placeholder to indicate to the
IPython exception-catching mechanism that the astropy
exception-capturing is activated. It should not actually be used as
an exception anywhere.
"""
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.logger`.
"""
log_level = _config.ConfigItem(
"INFO",
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are ``'DEBUG'``, "
"``'INFO'``, ``'WARNING'``, ``'ERROR'``.",
)
log_warnings = _config.ConfigItem(True, "Whether to log `warnings.warn` calls.")
log_exceptions = _config.ConfigItem(
False, "Whether to log exceptions before raising them."
)
log_to_file = _config.ConfigItem(
False, "Whether to always log messages to a log file."
)
log_file_path = _config.ConfigItem(
"",
"The file to log messages to. If empty string is given, "
"it defaults to a file ``'astropy.log'`` in "
"the astropy config directory.",
)
log_file_level = _config.ConfigItem(
"INFO", "Threshold for logging messages to `log_file_path`."
)
log_file_format = _config.ConfigItem(
"%(asctime)r, %(origin)r, %(levelname)r, %(message)r",
"Format for log file entries.",
)
log_file_encoding = _config.ConfigItem(
"",
"The encoding (e.g., UTF-8) to use for the log file. If empty string "
"is given, it defaults to the platform-preferred encoding.",
)
conf = Conf()
def _init_log():
"""Initializes the Astropy log--in most circumstances this is called
automatically when importing astropy.
"""
global log
orig_logger_cls = logging.getLoggerClass()
logging.setLoggerClass(AstropyLogger)
try:
log = logging.getLogger("astropy")
log._set_defaults()
finally:
logging.setLoggerClass(orig_logger_cls)
return log
def _teardown_log():
"""Shut down exception and warning logging (if enabled) and clear all
Astropy loggers from the logging module's cache.
This involves poking some logging module internals, so much if it is 'at
your own risk' and is allowed to pass silently if any exceptions occur.
"""
global log
if log.exception_logging_enabled():
log.disable_exception_logging()
if log.warnings_logging_enabled():
log.disable_warnings_logging()
del log
# Now for the fun stuff...
try:
logging._acquireLock()
try:
loggerDict = logging.Logger.manager.loggerDict
for key in loggerDict.keys():
if key == "astropy" or key.startswith("astropy."):
del loggerDict[key]
finally:
logging._releaseLock()
except Exception:
pass
Logger = logging.getLoggerClass()
class AstropyLogger(Logger):
"""
This class is used to set up the Astropy logging.
The main functionality added by this class over the built-in
logging.Logger class is the ability to keep track of the origin of the
messages, the ability to enable logging of warnings.warn calls and
exceptions, and the addition of colorized output and context managers to
easily capture messages to a file or list.
"""
def makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=None,
extra=None,
sinfo=None,
):
if extra is None:
extra = {}
if "origin" not in extra:
current_module = find_current_module(1, finddiff=[True, "logging"])
if current_module is not None:
extra["origin"] = current_module.__name__
else:
extra["origin"] = "unknown"
return Logger.makeRecord(
self,
name,
level,
pathname,
lineno,
msg,
args,
exc_info,
func=func,
extra=extra,
sinfo=sinfo,
)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
# Bail out if we are not catching a warning from Astropy
if not isinstance(args[0], AstropyWarning):
return self._showwarning_orig(*args, **kwargs)
warning = args[0]
# Deliberately not using isinstance here: We want to display
# the class name only when it's not the default class,
# AstropyWarning. The name of subclasses of AstropyWarning should
# be displayed.
if type(warning) not in (AstropyWarning, AstropyUserWarning):
message = f"{warning.__class__.__name__}: {args[0]}"
else:
message = str(args[0])
mod_path = args[2]
# Now that we have the module's path, we look through sys.modules to
# find the module object and thus the fully-package-specified module
# name. The module.__file__ is the original source file name.
mod_name = None
mod_path, ext = os.path.splitext(mod_path)
for name, mod in list(sys.modules.items()):
try:
# Believe it or not this can fail in some cases:
# https://github.com/astropy/astropy/issues/2671
path = os.path.splitext(getattr(mod, "__file__", ""))[0]
except Exception:
continue
if path == mod_path:
mod_name = mod.__name__
break
if mod_name is not None:
self.warning(message, extra={"origin": mod_name})
else:
self.warning(message)
def warnings_logging_enabled(self):
return self._showwarning_orig is not None
def enable_warnings_logging(self):
"""
Enable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are
redirected to this logger and emitted with level ``WARN``. Note that
this replaces the output from ``warnings.warn``.
This can be disabled with ``disable_warnings_logging``.
"""
if self.warnings_logging_enabled():
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
"""
Disable logging of warnings.warn() calls.
Once called, any subsequent calls to ``warnings.warn()`` are no longer
redirected to this logger.
This can be re-enabled with ``enable_warnings_logging``.
"""
if not self.warnings_logging_enabled():
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError(
"Cannot disable warnings logging: "
"warnings.showwarning was not set by this "
"logger, or has been overridden"
)
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, etype, value, traceback):
if traceback is None:
mod = None
else:
tb = traceback
while tb.tb_next is not None:
tb = tb.tb_next
mod = inspect.getmodule(tb)
# include the the error type in the message.
if len(value.args) > 0:
message = f"{etype.__name__}: {str(value)}"
else:
message = str(etype.__name__)
if mod is not None:
self.error(message, extra={"origin": mod.__name__})
else:
self.error(message)
self._excepthook_orig(etype, value, traceback)
def exception_logging_enabled(self):
"""
Determine if the exception-logging mechanism is enabled.
Returns
-------
exclog : bool
True if exception logging is on, False if not.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if ip is None:
return self._excepthook_orig is not None
else:
return _AstLogIPYExc in ip.custom_exceptions
def enable_exception_logging(self):
"""
Enable logging of exceptions.
Once called, any uncaught exceptions will be emitted with level
``ERROR`` by this logger, before being raised.
This can be disabled with ``disable_exception_logging``.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if self.exception_logging_enabled():
raise LoggingError("Exception logging has already been enabled")
if ip is None:
# standard python interpreter
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
else:
# IPython has its own way of dealing with excepthook
# We need to locally define the function here, because IPython
# actually makes this a member function of their own class
def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None):
# First use our excepthook
self._excepthook(etype, evalue, tb)
# Now also do IPython's traceback
ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
# now register the function with IPython
# note that we include _AstLogIPYExc so `disable_exception_logging`
# knows that it's disabling the right thing
ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler)
# and set self._excepthook_orig to a no-op
self._excepthook_orig = lambda etype, evalue, tb: None
def disable_exception_logging(self):
"""
Disable logging of exceptions.
Once called, any uncaught exceptions will no longer be emitted by this
logger.
This can be re-enabled with ``enable_exception_logging``.
"""
try:
ip = get_ipython()
except NameError:
ip = None
if not self.exception_logging_enabled():
raise LoggingError("Exception logging has not been enabled")
if ip is None:
# standard python interpreter
if sys.excepthook != self._excepthook:
raise LoggingError(
"Cannot disable exception logging: "
"sys.excepthook was not set by this logger, "
"or has been overridden"
)
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
else:
# IPython has its own way of dealing with exceptions
ip.set_custom_exc(tuple(), None)
def enable_color(self):
"""
Enable colorized output.
"""
_conf.use_color = True
def disable_color(self):
"""
Disable colorized output.
"""
_conf.use_color = False
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a file.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
By default, the logger already outputs log messages to a file set in
the Astropy configuration file. Using this context manager does not
stop log messages from being output to that file, nor does it stop log
messages from being printed to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_file('myfile.log'):
# your code here
"""
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(filename, encoding=encoding)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(conf.log_file_format)
fh.setFormatter(f)
self.addHandler(fh)
yield
fh.close()
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
"""
Context manager to temporarily log messages to a list.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
Using this context manager does not stop log messages from being
output to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_list() as log_list:
# your code here
"""
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def _set_defaults(self):
"""
Reset logger to its initial state.
"""
# Reset any previously installed hooks
if self.warnings_logging_enabled():
self.disable_warnings_logging()
if self.exception_logging_enabled():
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(conf.log_level)
# Set up the stdout handler
sh = StreamHandler()
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if conf.log_to_file:
log_file_path = conf.log_file_path
# "None" as a string because it comes from config
try:
_ASTROPY_TEST_ # noqa: B018
testing_mode = True
except NameError:
testing_mode = False
try:
if log_file_path == "" or testing_mode:
log_file_path = os.path.join(
_config.get_config_dir("astropy"), "astropy.log"
)
else:
log_file_path = os.path.expanduser(log_file_path)
encoding = conf.log_file_encoding if conf.log_file_encoding else None
fh = logging.FileHandler(log_file_path, encoding=encoding)
except OSError as e:
warnings.warn(
f"log file {log_file_path!r} could not be opened for writing:"
f" {str(e)}",
RuntimeWarning,
)
else:
formatter = logging.Formatter(conf.log_file_format)
fh.setFormatter(formatter)
fh.setLevel(conf.log_file_level)
self.addHandler(fh)
if conf.log_warnings:
self.enable_warnings_logging()
if conf.log_exceptions:
self.enable_exception_logging()
class StreamHandler(logging.StreamHandler):
"""
A specialized StreamHandler that logs INFO and DEBUG messages to
stdout, and all other messages to stderr. Also provides coloring
of the output, if enabled in the parent logger.
"""
def emit(self, record):
"""
The formatter for stderr.
"""
if record.levelno <= logging.INFO:
stream = sys.stdout
else:
stream = sys.stderr
if record.levelno < logging.DEBUG or not _conf.use_color:
print(record.levelname, end="", file=stream)
else:
# Import utils.console only if necessary and at the latest because
# the import takes a significant time [#4649]
from .utils.console import color_print
if record.levelno < logging.INFO:
color_print(record.levelname, "magenta", end="", file=stream)
elif record.levelno < logging.WARN:
color_print(record.levelname, "green", end="", file=stream)
elif record.levelno < logging.ERROR:
color_print(record.levelname, "brown", end="", file=stream)
else:
color_print(record.levelname, "red", end="", file=stream)
record.message = f"{record.msg} [{record.origin:s}]"
print(": " + record.message, file=stream)
class FilterOrigin:
"""A filter for the record origin."""
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
class ListHandler(logging.Handler):
"""A handler that can be used to capture the records in a list."""
def __init__(self, filter_level=None, filter_origin=None):
logging.Handler.__init__(self)
self.log_list = []
def emit(self, record):
self.log_list.append(record)
|
93c9a11843bfb33fa31e5b247121d6e092fa28c4f90fa2db978a297b33e11d25 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import configparser
import doctest
import os
import sys
from datetime import datetime
from importlib import metadata
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires("astropy"):
if 'extra == "docs"' in line:
req = Requirement(line.split(";")[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print(
"The following packages could not be found and are required to "
"build the documentation:"
)
for key, val in missing_requirements.items():
print(f" * {key} {val}")
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa: E402
from sphinx_astropy.conf.v1 import ( # noqa: E402
exclude_patterns,
extensions,
intersphinx_mapping,
numpydoc_xref_aliases,
numpydoc_xref_astropy_aliases,
numpydoc_xref_ignore,
rst_epilog,
)
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {
"axes.labelsize": "large",
"figure.figsize": (6, 6),
"figure.subplot.hspace": 0.5,
"savefig.bbox": "tight",
"savefig.facecolor": "none",
}
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ["png", "svg", "pdf"]
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.0"
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping["astropy"]
# add any custom intersphinx for astropy
intersphinx_mapping.update(
{
"astropy-dev": ("https://docs.astropy.org/en/latest/", None),
"pyerfa": ("https://pyerfa.readthedocs.io/en/stable/", None),
"pytest": ("https://docs.pytest.org/en/stable/", None),
"ipython": ("https://ipython.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sphinx_automodapi": (
"https://sphinx-automodapi.readthedocs.io/en/stable/",
None,
),
"asdf-astropy": ("https://asdf-astropy.readthedocs.io/en/latest/", None),
"fsspec": ("https://filesystem-spec.readthedocs.io/en/latest/", None),
}
)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# .inc.rst mean *include* files, don't have sphinx process them
exclude_patterns += ["_templates", "changes", "_pkgtemplate.rst", "**/*.inc.rst"]
# Add any paths that contain templates here, relative to this directory.
if "templates_path" not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append("_templates")
extensions += ["sphinx_changelog"]
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg"))
__minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "")
min_versions = {}
for line in metadata.requires("astropy"):
req = Requirement(line.split(";")[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(
minimum_python=__minimum_python_version__, **min_versions
)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT")
REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA")
FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP")
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update(
{
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments",
"Path",
# TODO! not need to ignore.
"flag",
"bits",
}
)
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update(
{
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`",
}
)
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# Turn off table of contents entries for functions and classes
toc_object_entries = False
# -- Project information ------------------------------------------------------
project = "Astropy"
author = "The Astropy Developers"
copyright = f"2011–{datetime.utcnow().year}, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# Only include dev docs in dev version.
dev = "dev" in release
if not dev:
exclude_patterns += ["development/*", "testhelpers.rst"]
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ["astropy."]
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"{project} v{release}"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# A dictionary of values to pass into the template engine's context for all pages.
html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", project + ".tex", project + " Documentation", author, "manual")
]
latex_logo = "_static/astropy_logo.pdf"
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project.lower(), project + " Documentation", [author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = "https://github.com/astropy/astropy/issues/"
edit_on_github_branch = "main"
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open("nitpick-exceptions"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
nitpick_ignore.append((dtype, target.strip()))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
"backreferences_dir": "generated/modules", # path to store the module using example template
"filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_"
"examples_dirs": f"..{os.sep}examples", # path to the examples scripts
"gallery_dirs": "generated/examples", # path to save gallery generated examples
"reference_url": {
"astropy": None,
"matplotlib": "https://matplotlib.org/stable/",
"numpy": "https://numpy.org/doc/stable/",
},
"abort_on_example_error": True,
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = [
"https://journals.aas.org/manuscript-preparation/",
"https://maia.usno.navy.mil/",
"https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer",
"https://aa.usno.navy.mil/publications/docs/Circular_179.php",
"http://data.astropy.org",
"https://doi.org/", # CI blocked by service provider
"https://ui.adsabs.harvard.edu", # CI blocked by service provider
"https://www.tandfonline.com/", # 403 Client Error: Forbidden
"https://physics.nist.gov/", # SSL: CERTIFICATE_VERIFY_FAILED
"https://ieeexplore.ieee.org/", # 418 Client Error: I'm a teapot
"https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst
r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+",
]
linkcheck_timeout = 180
linkcheck_anchors = False
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs."""
# Make sure we're outputting HTML
if app.builder.format != "html":
return
files_to_render = ["index", "install", "development/index"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context
)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get("reftarget") # str or None
if str(reftarget).startswith("astropy:"):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, "astropy:"
elif dev and str(reftarget).startswith("astropy-dev:"):
process, replace = True, "astropy-dev:"
else:
process, replace = False, ""
# make link local
if process:
reftype = node.get("reftype")
refdoc = node.get("refdoc", app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patterns, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, "")
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node["refdomain"]]
return domain.resolve_xref(
app.env, refdoc, app.builder, reftype, reftarget, node, contnode
)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = (
"The sphinx_gallery extension is not installed, so the "
"gallery will not be built. You will probably see "
"additional warnings about undefined references due "
"to this."
)
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
|
5c9042fdc14da552a85582c43388385474e1c69c247a85c37ac07440ee202bed | """
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <https://simbad.unistra.fr/simbad/>`_ database:
c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s,
frame='icrs')
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interpreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]")
axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]")
fig.tight_layout()
plt.show()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]')
ax.legend()
plt.show()
|
1913097fc76662574dac525d431f769da2249397c66f51927c119feed8febbc2 | """
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
*By: Adrian Price-Whelan*
*License: BSD*
"""
################################################################################
# Import the required Astropy packages:
import astropy.coordinates as coord
import astropy.units as u
################################################################################
# Use the latest convention for the Galactocentric coordinates
coord.galactocentric_frame_defaults.set('latest')
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <https://simbad.unistra.fr/simbad/>`_:
icrs = coord.SkyCoord(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s, frame='icrs')
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity`, optional
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
|
12d8a8caa71a6c3cfb0ef15847ba72f29ee0da4698cbb661d8d6610d94d84966 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
from . import _stats
__all__ = [
"gaussian_fwhm_to_sigma",
"gaussian_sigma_to_fwhm",
"binom_conf_interval",
"binned_binom_proportion",
"poisson_conf_interval",
"median_absolute_deviation",
"mad_std",
"signal_to_noise_oir_ccd",
"bootstrap",
"kuiper",
"kuiper_two",
"kuiper_false_positive_probability",
"cdf_from_intervals",
"interval_overlap_length",
"histogram_intervals",
"fold_intervals",
]
__doctest_skip__ = ["binned_binom_proportion"]
__doctest_requires__ = {
"binom_conf_interval": ["scipy"],
"poisson_conf_interval": ["scipy"],
}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
"""
if confidence_level < 0.0 or confidence_level > 1.0:
raise ValueError("confidence_level must be between 0. and 1.")
alpha = 1.0 - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError("n must be positive")
if (k < 0).any() or (k > n).any():
raise ValueError("k must be in {0, 1, .., n}")
if interval == "wilson" or interval == "wald":
from scipy.special import erfinv
kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == "wilson":
midpoint = (k + kappa**2 / 2.0) / (n + kappa**2)
halflength = (
(kappa * np.sqrt(n))
/ (n + kappa**2)
* np.sqrt(p * (1 - p) + kappa**2 / (4 * n))
)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.0] = 0.0
conf_interval[conf_interval > 1.0] = 1.0
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1.0 - p) / n)
conf_interval = np.array([midpoint - halflength, midpoint + halflength])
elif interval == "jeffreys" or interval == "flat":
from scipy.special import betaincinv
if interval == "jeffreys":
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.0
elif k == n:
upperbound = 1.0
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f"Unrecognized interval: {interval:s}")
return conf_interval
def binned_binom_proportion(
x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson"
):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError("sizes of x and success must match")
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(
k, n, confidence_level=confidence_level, interval=interval
)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(
n, interval="root-n", sigma=1, background=0, confidence_level=None
):
r"""Poisson parameter confidence interval given observed counts.
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <https://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == "root-n":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
elif interval == "root-n-0":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == "pearson":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array(
[n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)]
)
elif interval == "sherpagehrels":
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)])
elif interval == "frequentist-confidence":
_check_poisson_conf_inputs(1.0, background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array(
[
0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha),
]
)
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == "kraft-burrows-nousek":
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError("Number of counts must be integer.")
elif not issubclass(n.dtype.type, np.integer):
raise TypeError("Number of counts must be integer.")
if confidence_level is None:
raise ValueError(
f"Set confidence_level for method {interval}. (sigma is ignored.)"
)
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError("confidence_level must be a number between 0 and 1.")
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError("Background must be >= 0.")
conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)(
n, background, confidence_level
)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(
t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2
)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <https://mpmath.org/>`_ library.
"""
from math import exp
from scipy.integrate import quad
from scipy.optimize import brentq
from scipy.special import factorial
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.0
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <https://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
"""
from mpmath import exp, factorial, findroot, fsum, mpf, power, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1.0 / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
"""
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
"""
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.0
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.0)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
"""Upper limit on a poisson count rate.
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<https://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
"""
from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError("Need mpmath package for input numbers this large.")
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError("Either scipy or mpmath are required.")
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import comb, factorial
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import comb, factorial
if D < 0.0 or D > 2.0:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2.0 / N:
return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1)
elif D < 3.0 / N:
k = -(N * D - 1.0) / 2.0
r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0)
a, b = -k + r, -k - r
return 1 - (
factorial(N - 1)
* (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b))
/ N ** (N - 2)
/ (b - a)
)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y ** (t - 3) * (
y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2
)
term1 = comb(N, t)
term2 = (1 - D - t / N) ** (N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.0
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (
ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)
).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax(
(np.arange(N) + 1) / float(N) - cdfv
)
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
(n1,) = data1.shape
(n2,) = data2.shape
common_type = np.result_type(data1.dtype, data2.dtype)
if not (
np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)
):
raise ValueError("kuiper_two only accepts real inputs")
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError("kuiper_two only accepts non-nan inputs")
D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for a, b, wt in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.0)
breaks.add(1.0)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for a, b, wt in r:
totals[breaks_map[a] : breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError("Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.0
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end))
h[j] += ol / (1.0 / n) * totals[i]
start = end
return h
|
514d302adf93d056a62e753a15ba44cb658c192f517800d1840c3a6181999e5c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms.
Ported from the astroML project: https://www.astroml.org/
"""
import numpy as np
from .bayesian_blocks import bayesian_blocks
__all__ = [
"histogram",
"scott_bin_width",
"freedman_bin_width",
"knuth_bin_width",
"calculate_bin_edges",
]
def calculate_bin_edges(a, bins=10, range=None, weights=None):
"""
Calculate histogram bin edges like ``numpy.histogram_bin_edges``.
Parameters
----------
a : array-like
Input data. The bin edges are calculated over the flattened array.
bins : int, list, or str, optional
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None, optional
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added.
"""
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError(
"weights are not yet supported for the enhanced histogram"
)
if bins == "blocks":
bins = bayesian_blocks(a)
elif bins == "knuth":
da, bins = knuth_bin_width(a, True)
elif bins == "scott":
da, bins = scott_bin_width(a, True)
elif bins == "freedman":
da, bins = freedman_bin_width(a, True)
else:
raise ValueError(f"unrecognized bin code: '{bins}'")
if range:
# Check that the upper and lower edges are what was requested.
# The current implementation of the bin width estimators does not
# guarantee this, it only ensures that data outside the range is
# excluded from calculation of the bin widths.
if bins[0] != range[0]:
bins[0] = range[0]
if bins[-1] != range[1]:
bins[-1] = range[1]
elif np.ndim(bins) == 0:
# Number of bins was given
bins = np.histogram_bin_edges(a, bins, range=range, weights=weights)
return bins
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule.
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule.
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if "Maximum allowed size exceeded" in str(e):
raise ValueError(
"The inter-quartile range of the data is too small: "
f"failed to construct histogram with {Nbins + 1} bins. "
"Please use another bin method, such as "
'bins="scott"'
)
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool, optional
if True, then return the bin edges
quiet : bool, optional
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width.
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given M number of bins."""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function.
Parameters
----------
M : int
Number of bins
Returns
-------
F : float
evaluation of the negative Knuth loglikelihood function:
smaller values indicate a better fit.
"""
if not np.isscalar(M):
M = M[0]
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(
self.n * np.log(M)
+ self.gammaln(0.5 * M)
- M * self.gammaln(0.5)
- self.gammaln(self.n + 0.5 * M)
+ np.sum(self.gammaln(nk + 0.5))
)
|
d973e93773f276e307f5b8398ca9b876ee3f52868147f0badb4295b56da5c0ba | """
Table property for providing information about table.
"""
import os
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
from contextlib import contextmanager
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ["table_info", "TableInfo", "serialize_method_as"]
def table_info(tbl, option="attributes", out=""):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1 2
Parameters
----------
option : str, callable, list of (str or callable)
Info option, defaults to 'attributes'.
out : file-like, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == "":
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(tbl)}")
outlines = ["<" + " ".join(descr_vals) + ">"]
cols = list(tbl.columns.values())
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if "class" in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = {type(col) for col in cols}
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info["class"]
if "n_bad" in info.colnames and np.all(info["n_bad"] == 0):
del info["n_bad"]
# Standard attributes has 'length' but this is typically redundant
if "length" in info.colnames and np.all(info["length"] == len(tbl)):
del info["length"]
for name in info.colnames:
if info[name].dtype.kind in "SU" and np.all(info[name] == ""):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append("<No columns>")
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
def __call__(self, option="attributes", out=""):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isinstance(key, type) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
# Original serialize_method dict, keyed by column name. This only
# gets used and set if there is an override.
original_sms = {}
if serialize_method:
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, "serialize_method"):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {
fmt: override_sm for fmt in col.info.serialize_method
}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
|
1aede49d4d5a16ba646c56102ee1ef4ee1cf37c26363159c015051c172c0cb30 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Indexing for Table columns.
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MaxValue, MinValue
from .sorted_array import SortedArray
class QueryError(ValueError):
"""
Indicates that a given index cannot handle the supplied query.
"""
pass
class Index:
"""
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
"""
def __init__(self, columns, engine=None, unique=False):
# Local imports to avoid import problems.
from astropy.time import Time
from .table import Column, Table
if columns is not None:
columns = list(columns)
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort(kind="stable"))
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(
col.jd, format="jd", scale=col.scale
)
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort(kind="stable")]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
"""
Number of rows in index.
"""
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
"""
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
"""
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
"""
Recreate the index based on data in self.columns.
"""
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
"""
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
"""
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError(f"Column does not belong to index: {col_name}")
def insert_row(self, pos, vals, columns):
"""
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
"""
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[self.col_position(col.info.name)] = vals[i]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
"""
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
"""
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError(
"Expected int, array of ints, or slice but got {} in remove_rows".format(
row_specifier
)
)
def remove_rows(self, row_specifier):
"""
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
"""
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
"""
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
"""
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple(col[row] for col in self.columns), row):
raise ValueError(f"Could not remove row {row} from index")
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
"""
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
"""
return self.data.find(key)
def same_prefix(self, key):
"""
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
"""
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
"""
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
"""
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
"""
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
"""
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
"""
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
"""
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
"""
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
"""
row_map = {row: i for i, row in enumerate(col_slice)}
self.data.replace_rows(row_map)
def sort(self):
"""
Make row numbers follow the same sort order as the keys
of the index.
"""
self.data.sort()
def sorted_data(self):
"""
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
"""
return self.data.sorted_data()
def __getitem__(self, item):
"""
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
"""
return SlicedIndex(self, item)
def __repr__(self):
col_names = tuple(col.info.name for col in self.columns)
return f"<{self.__class__.__name__} columns={col_names} data={self.data}>"
def __deepcopy__(self, memo):
"""
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
"""
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
"""
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : tuple, slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
"""
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
elif isinstance(index_slice, slice): # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
else:
raise TypeError("index_slice must be tuple or slice")
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
"""
The stopping position of the slice, or the end of the
index if this is an original slice.
"""
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
"""
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
"""
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
"""
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
"""
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
"""
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
"""
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def get_index_or_copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.get_index_or_copy().insert_row(self.orig_coords(pos), vals, columns)
def get_row_specifier(self, row_specifier):
return [
self.orig_coords(x) for x in self.index.get_row_specifier(row_specifier)
]
def remove_rows(self, row_specifier):
if not self._frozen:
self.get_index_or_copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.get_index_or_copy().sort()
def __repr__(self):
slice_str = (
"" if self.original else f" slice={self.start}:{self.stop}:{self.step}"
)
return (
f"<{self.__class__.__name__} original={self.original}{slice_str}"
f" index={self.index}>"
)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
"""
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
"""
from .table import Table
if len(self.columns) == 1:
index = Index([col_slice], engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
t = Table(self.columns, copy_indices=False)
with t.index_mode("discard_on_copy"):
new_cols = t[item].columns.values()
index = Index(new_cols, engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is None and table_copy is None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f"{names} is not a subset of table columns")
for name in names:
for index in table[name].info.indices:
if {col.info.name for col in index.columns} == names:
return index
return None
def get_index_by_names(table, names):
"""
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
"""
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
"""
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
"""
_col_subclasses = {}
def __init__(self, table, mode):
"""
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ("freeze", "discard_on_copy", "copy_on_getitem"):
raise ValueError(
"Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode)
)
def __enter__(self):
if self.mode == "discard_on_copy":
self.table._copy_indices = False
elif self.mode == "copy_on_getitem":
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == "discard_on_copy":
self.table._copy_indices = True
elif self.mode == "copy_on_getitem":
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f"_{cls.__name__}WithIndexCopy"
new_cls = type(str(clsname), (cls,), {"__getitem__": __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
"""
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
"""
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
"""
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
"""
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError(f"No matches found for key {key}")
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {item}")
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {key}")
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError(f"Right side should contain {len(rows)} values")
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f"No matches found for key {item}")
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
"""
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError(f"Invalid index for iloc: {item}")
return table_slice
|
94457f026de4e31db816d9036f34fa1a7891004ebd3b34494253e9cb359ad8c1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError(
f"index {index} out of range for table with length {len(table)}"
)
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError(
"Unable to compare rows for masked table due to numpy.ma bug"
)
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError(
"Unable to compare rows for masked table due to numpy.ma bug"
)
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError("Datatype coercion is not allowed")
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
def get(self, key, default=None, /):
"""Return the value for key if key is in the columns, else default.
Parameters
----------
key : `str`, positional-only
The name of the column to look for.
default : `object`, optional, positional-only
The value to return if the ``key`` is not among the columns.
Returns
-------
`object`
The value in the ``key`` column of the row if present,
``default`` otherwise.
Examples
--------
>>> from astropy.table import Table
>>> t = Table({"a": [2, 3, 5], "b": [7, 11, 13]})
>>> t[0].get("a")
2
>>> t[1].get("b", 0)
11
>>> t[2].get("c", 0)
0
"""
return self[key] if key in self._table.columns else default
def keys(self):
return self._table.columns.keys()
def values(self):
return self.__iter__()
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : ``numpy.void`` or ``numpy.ma.mvoid``
Copy of row values.
``numpy.void`` if unmasked, ``numpy.ma.mvoid`` else.
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
mask = tuple(
col.mask[index] if hasattr(col, "mask") else False for col in cols
)
void_row = np.ma.array([vals], mask=[mask], dtype=self.dtype)[0]
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index : index + 1]
descr_vals = [self.__class__.__name__, f"index={self.index}"]
if table.masked:
descr_vals.append("masked=True")
return table._base_repr_(
html, descr_vals, max_width=-1, tableid=f"table{id(self._table)}"
)
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return "\n".join(self.table[index : index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode("utf-8")
collections.abc.Sequence.register(Row)
|
b8a0c8dfc3b4a5e2c35fe690a760e417986d651fe86b7a542efe4d5af5f1ca50 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .index import get_index_by_names
__all__ = ["TableGroups", "ColumnGroups"]
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode("discard_on_copy"):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .serialize import represent_mixins_as_columns
from .table import Table
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError(f"Table does not have key column {name!r}")
if table.masked and np.any(table[name].mask):
raise ValueError(
f"Missing values in key column {name!r} are not allowed"
)
# Make a column slice of the table without copying
table_keys = table.__class__([table[key] for key in keys], copy=False)
# If available get a pre-existing index for these columns
table_index = get_index_by_names(table, keys)
grouped_by_table_cols = True
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError(
"Input keys array length {} does not match table length {}".format(
len(table_keys), len(table)
)
)
table_index = None
grouped_by_table_cols = False
else:
raise TypeError(
"Keys input must be string, list, tuple, Table or numpy array, but got {}".format(
type(keys)
)
)
# TODO: don't use represent_mixins_as_columns here, but instead ensure that
# keys_sort.argsort(kind="stable") works for all columns (including mixins).
# If there is not already an available index and table_keys is a Table then ensure
# that all cols (including mixins) are in a form that can sorted with the code below.
if not table_index and isinstance(table_keys, Table):
table_keys_sort = represent_mixins_as_columns(table_keys)
else:
table_keys_sort = table_keys
# Get the argsort index `idx_sort`, accounting for particulars
try:
# take advantage of index internal sort if possible
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys_sort.argsort(kind="stable")
stable_sort = True
except TypeError:
# TODO: is this still needed?
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys_sort.argsort()
stable_sort = platform.system() not in ("Darwin", "Windows")
# Finally do the actual sort of table_keys values
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta["grouped_by_table_cols"] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``.
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .serialize import represent_mixins_as_columns
from .table import Table
# TODO: don't use represent_mixins_as_columns here, but instead ensure that
# keys_sort.argsort(kind="stable") works for all columns (including mixins).
if isinstance(keys, Table):
keys_sort = represent_mixins_as_columns(keys)
else:
keys_sort = keys
if len(keys_sort) != len(column):
raise ValueError(
"Input keys array length {} does not match column length {}".format(
len(keys), len(column)
)
)
try:
idx_sort = keys_sort.argsort(kind="stable")
except AttributeError:
raise TypeError(
f"keys input ({keys.__class__.__name__}) must have an `argsort` method"
)
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return (
self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
)
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception as err:
raise TypeError(
"Index item for groups attribute must be a slice, "
"numpy mask or int array"
) from err
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return f"<{self.__class__.__name__} indices={self.indices}>"
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.info.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, "reduceat")
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0:i1]) for i0, i1 in zip(i0s, i1s)])
out = par_col.__class__(vals)
except Exception as err:
raise TypeError(
"Cannot aggregate column '{}' with type '{}': {}".format(
par_col.info.name, par_col.info.dtype, err
)
) from err
out_info = out.info
for attr in ("name", "unit", "format", "description", "meta"):
try:
setattr(out_info, attr, getattr(par_col.info, attr))
except AttributeError:
pass
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, "meta", {}).get(
"grouped_by_table_cols", False
)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s = self.indices[:-1]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.info.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
95e9cc344bc97ff7b3093a9d785c655b65bf14f493f897afc5c61b7ad9616e7a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import sys
import types
import warnings
import weakref
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy import log
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.units import Quantity, QuantityInfo
from astropy.utils import ShapedLikeNDArray, isiterable
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaAttribute, MetaData
from . import conf, groups
from .column import (
BaseColumn,
Column,
FalseArray,
MaskedColumn,
_auto_names,
_convert_sequence_data_to_array,
col_copy,
)
from .connect import TableRead, TableWrite
from .index import (
Index,
SlicedIndex,
TableILoc,
TableIndices,
TableLoc,
TableLocIndices,
_IndexModeContext,
get_index,
)
from .info import TableInfo
from .mixins.registry import get_mixin_handler
from .ndarray_mixin import NdarrayMixin # noqa: F401
from .pprint import TableFormatter
from .row import Row
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = [
"Table.read",
"Table.write",
"Table._read",
"Table.convert_bytestring_to_unicode",
"Table.convert_unicode_to_bytestring",
]
__doctest_requires__ = {"*pandas": ["pandas>=1.1"]}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = "O" if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, "shape") else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, "info", None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError(
f"Illegal key or index value for {type(self).__name__} object"
)
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError(
f"Cannot replace column '{item}'. Use Table.replace_column() instead."
)
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist."""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list."""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder("=")
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"):
data[col.info.name].mask = col.mask
return data
def __init__(
self,
data=None,
masked=False,
names=None,
dtype=None,
meta=None,
copy=True,
rows=None,
copy_indices=True,
units=None,
descriptions=None,
**kwargs,
):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError("Cannot specify dtype when copy=False")
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError("Cannot supply both `data` and `rows` values")
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, "__astropy_table__"):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError(
f"__init__() got unexpected keyword argument {list(kwargs.keys())[0]!r}"
)
if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names:
data = None
if isinstance(data, self.Row):
data = data._table[data._index : data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (
names_from_list_of_dict or _get_names_from_list_of_dict(data)
)
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError("Can not initialize a Table with a scalar")
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError(
"dtype was specified but could not be "
"parsed for column names"
)
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f"Data type {type(data)} not allowed to init Table")
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute("unit", units)
self._set_column_attribute("description", descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(
f"sequence of {attr} values must match number of columns"
)
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(
f"invalid column name {name} for setting {attr} attribute"
)
# Special case: ignore unit if it is an empty or blank string
if attr == "unit" and isinstance(value, str):
if value.strip() == "":
value = None
if value not in (np.ma.masked, None):
col = self[name]
if attr == "unit" and isinstance(col, Quantity):
# Update the Quantity unit in-place
col <<= value
else:
setattr(col.info, attr, value)
def __getstate__(self):
columns = OrderedDict(
(key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items()
)
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table(
[
getattr(col, "mask", FalseArray(col.shape))
for col in self.itercols()
],
names=self.colnames,
copy=False,
)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property.
"""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [
col.filled(fill_value) if hasattr(col, "filled") else col
for col in self.itercols()
]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
"""
Return the indices associated with columns of the table
as a TableIndices object.
"""
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
"""
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
"""
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
"""
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
"""
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
'Cannot create an index on column "{}", of type "{}"'.format(
col.info.name, type(col)
)
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
"""
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
"""
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
"""
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
if np.dtype(dtype) != object:
raise ValueError("Datatype coercion is not allowed")
out = np.array(None, dtype=object)
out[()] = self
return out
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, "dtype"), (names, "names")):
if not isiterable(inp_list):
raise ValueError(f"{inp_str} must be a list or None")
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dt in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dt, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(
self, data, copy=True, default_name=None, dtype=None, name=None
):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (
original_data.__class__.__module__
+ "."
+ original_data.__class__.__name__
)
raise TypeError(
"Mixin handler for object of type "
f"{fully_qualified_name} "
"did not return a valid mixin column"
)
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif "info" in getattr(data, "__dict__", ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute. If not copying, take a slice
# to ensure we get a new instance and we do not share metadata
# like info.
col = col_copy(data, copy_indices=self._init_indices) if copy else data[:]
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, "dtype"):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = (
masked_col_cls
if isinstance(data, np.ma.MaskedArray)
else self.ColumnClass
)
else:
col_cls = self.ColumnClass
try:
col = col_cls(
name=name,
data=data,
dtype=dtype,
copy=copy,
copy_indices=self._init_indices,
)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError("unable to convert data to Column for Table")
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array."""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = (
[data[name] for name in data_names]
if struct
else [data[:, i] for i in range(n_cols)]
)
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns."""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects."""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f"Inconsistent data column lengths: {lengths}")
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(
table, newcols, verify=False, names=self.columns.keys()
)
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError("Cannot have None for column name")
if len(set(names)) != len(names):
raise ValueError("Duplicate column names")
table.columns = table.TableColumns(
(name, col) for name, col in zip(names, cols)
)
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, "mask"):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(
self,
html=False,
descr_vals=None,
max_width=None,
tableid=None,
show_dtype=True,
max_lines=None,
tableclass=None,
):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append(f"length={len(self)}")
descr = " ".join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f"<i>{xml_escape(descr)}</i>\n"
else:
descr = f"<{descr}>\n"
if tableid is None:
tableid = f"table{id(self)}"
data_lines, outs = self.formatter._pformat_table(
self,
tableid=tableid,
html=html,
max_width=max_width,
show_name=True,
show_unit=None,
show_dtype=show_dtype,
max_lines=max_lines,
tableclass=tableclass,
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(
html=True, max_width=-1, tableclass=conf.default_notebook_table_class
)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f"<div>{out}</div>"
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return "\n".join(self.pformat())
def __bytes__(self):
return str(self).encode("utf-8")
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
return any(hasattr(col, "mask") and np.any(col.mask) for col in self.itercols())
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
align=None,
):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(
max_lines, max_width, show_name, show_unit, show_dtype, align
)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()), copy=False)
else:
return self
def show_in_notebook(
self,
tableid=None,
css=None,
display_length=50,
table_class="astropy-default",
show_row_index="idx",
):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from IPython.display import HTML
from .jsviewer import JSViewer
if tableid is None:
tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}"
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == "astropy-default":
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(
html=True,
max_width=-1,
tableid=tableid,
max_lines=-1,
show_dtype=False,
tableclass=table_class,
)
columns = display_table.columns.values()
sortable_columns = [
i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc"
]
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(
self,
max_lines=5000,
jsviewer=False,
browser="default",
jskwargs={"use_local_files": True},
tableid=None,
table_class="display compact",
css=None,
show_row_index="idx",
):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import tempfile
import webbrowser
from urllib.parse import urljoin
from urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "table.html")
with open(path, "w") as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(
tmp,
format="jsviewer",
css=css,
max_lines=max_lines,
jskwargs=jskwargs,
table_id=tableid,
table_class=table_class,
)
else:
self.write(tmp, format="html")
try:
br = webbrowser.get(None if browser == "default" else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin("file:", pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
tableid=tableid,
tableclass=tableclass,
align=align,
)
if outs["show_length"]:
lines.append(f"Length = {len(self)} rows")
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(
self,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
align=None,
tableclass=None,
):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(
max_lines,
max_width,
show_name,
show_unit,
show_dtype,
html,
tableid,
align,
tableclass,
)
def more(
self,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(
self,
max_lines,
max_width,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (
isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i"
):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__(
[self[x] for x in item], copy_indices=self._copy_indices
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif (isinstance(item, np.ndarray) and item.size == 0) or (
isinstance(item, (tuple, list)) and not item
):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (
not getattr(self, "_setitem_inplace", False)
and not conf.replace_inplace
):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (
isinstance(item, (slice, np.ndarray, list))
or isinstance(item, tuple)
and all(isinstance(x, np.ndarray) for x in item)
):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError(
"Right side value needs {} elements (one for each column)".format(
n_cols
)
)
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f"Illegal type {type(item)} for table item access")
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif isinstance(item, (list, tuple, np.ndarray)) and all(
isinstance(x, str) for x in item
):
self.remove_columns(item)
elif (
isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i"
):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError("illegal key or index value")
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception(
"Masked attribute is read-only (use t = Table(t, masked=True)"
" to convert to a masked table)"
)
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings."""
return (
isinstance(names, (tuple, list))
and names
and all(isinstance(x, str) for x in names)
)
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def __or__(self, other):
if isinstance(other, Table):
updated_table = self.copy()
updated_table.update(other)
return updated_table
else:
return NotImplemented
def __ior__(self, other):
try:
self.update(other)
return self
except TypeError:
return NotImplemented
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(
self,
col,
index=None,
name=None,
rename_duplicate=False,
copy=True,
default_name=None,
):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f"col{len(self.columns)}"
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(
col, name=name, copy=copy, default_name=default_name
)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError("Empty table cannot have column set to scalar value")
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, "shape", ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape, subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape, subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError("Inconsistent data column lengths")
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + "_" + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(
self, cols, indexes=None, names=None, copy=True, rename_duplicate=False
):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError("Number of indexes must match number of cols")
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError("Number of names must match number of cols")
default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes, kind="stable")):
self.add_column(
cols[ii],
index=indexes[ii],
name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate,
copy=copy,
)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
# sys.getrefcount is CPython specific and not on PyPy.
if (
"refcount" in warns
and name in self.colnames
and hasattr(sys, "getrefcount")
):
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if "always" in warns:
warnings.warn(
f"replaced column '{name}'", TableReplaceWarning, stacklevel=3
)
if "slice" in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = (
"replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
# sys.getrefcount is CPython specific and not on PyPy.
if "refcount" in warns and hasattr(sys, "getrefcount"):
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = (
"replaced column '{}' and the number of references "
"to the column changed.".format(name)
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if "attributes" in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = "replaced column '{}' and column attributes {} changed.".format(
name, changed_attrs
)
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f"column name {name} is not in the table")
if self[name].info.indices:
raise ValueError("cannot replace a table index column")
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError("length of new column must match table length")
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f"{name} is not a valid column name")
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f"columns {invalid_names} do not exist")
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
"""
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
"""
for name in self._set_of_names_in_colnames(names):
del self.columns[name]
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, "utf-8"))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in (
col.info.attr_names - col.info._attrs_no_copy - {"dtype"}
):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype("S", "U", np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype("U", "S", np.char.encode)
def keep_columns(self, names):
"""
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
"""
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
del self.columns[colname]
def rename_column(self, name, new_name):
"""
Rename a column.
This can also be done directly by setting the ``name`` attribute
of the ``info`` property of the column::
table[name].info.name = new_name
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
"""
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
"""
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
"""
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError(
"input 'new_names' must be a tuple or a list of column names"
)
if len(names) != len(new_names):
raise ValueError(
"input 'names' and 'new_names' list arguments must be the same length"
)
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError(
"right hand side must be a sequence of values with "
"the same length as the number of selected columns"
)
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError(
f"Index {index} is out of bounds for table with length {N}"
)
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError("keys in mask should match keys in vals")
if vals and any(name not in colnames for name in vals):
raise ValueError("Keys in vals must all be valid column names")
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, "dtype"):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError("Mismatch between number of vals and columns")
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError("Mismatch between number of masks and columns")
else:
mask = [False] * len(self.columns)
else:
raise TypeError("Vals must be an iterable or mapping or None")
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if (
mask_
and isinstance(col, Column)
and not isinstance(col, MaskedColumn)
):
col_cls = (
self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn
)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError(
"Incorrect length for column {} after inserting {}"
" (expected {}, got {})".format(name, val, len(newcol), N + 1)
)
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, "mask"):
newcol[index] = np.ma.masked
else:
raise TypeError(
"mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name)
)
columns[name] = newcol
except Exception as err:
raise ValueError(
"Unable to insert row because of exception in column '{}':\n{}".format(
name, err
)
) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, "_groups"):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``. If ``other`` is a
|Table| instance then ``|=`` is available as alternate syntax for in-place
update and ``|`` can be used merge data to a new table.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts="silent")
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
"""
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
"""
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, "_groups"):
out._groups = groups.TableGroups(
out, indices=self._groups._indices, keys=self._groups._keys
)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError("cannot compare tables with different column names")
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
eq = self[name] == other[name]
if (
warns
and issubclass(warns[-1].category, FutureWarning)
and "elementwise comparison failed" in str(warns[-1].message)
):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f"unable to compare column {name}") from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (
isinstance(eq, np.ndarray)
and eq.dtype is np.dtype("bool")
and len(eq) == len(self)
):
raise TypeError(
f"comparison for column {name} returned {eq} "
"instead of the expected boolean ndarray"
)
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``.
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance.
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError(
"index must be None, False, True or a table column name"
)
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.time import TimeBase, TimeDelta
from . import serialize
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = (
col_copy(col, copy_indices=False) if col.info.indices else col
)
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype("timedelta64[ns]")
nat = np.timedelta64("NaT")
else:
new_col = col.datetime64.copy()
nat = np.datetime64("NaT")
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
# fmt: off
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)'
)
# fmt: on
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, "isnative", True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder("=")
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ["i", "u"]:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace("i", "I").replace("u", "U")
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to"
f" {out[name].dtype}",
TableReplaceWarning,
stacklevel=3,
)
elif column.dtype.kind not in ["f", "c"]:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs["index"] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance.
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or "index"
while index_name in names:
index_name = "_" + index_name + "_"
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f"`units` contains additional columns: {not_found}")
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ["u", "i"] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(
data=data, name=name, mask=mask, unit=unit, copy=False
)
continue
if data.dtype.kind == "O":
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b""
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array(list(data))
# Numpy datetime64
if data.dtype.kind == "M":
from astropy.time import Time
out[name] = Time(data, format="datetime64")
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = "isot"
# Numpy timedelta64
elif data.dtype.kind == "m":
from astropy.time import TimeDelta
data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format="sec")
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
For more information see:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, "unit", None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(
f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning,
)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
70ee8ac78ab97b628b1ae9099a51598e5625e32dc14d6a2499c728f07b2362a0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.config as _config
from astropy.utils.compat import optional_deps
from .column import Column, ColumnInfo, MaskedColumn, StringTruncateWarning
__all__ = [
"BST",
"Column",
"ColumnGroups",
"ColumnInfo",
"Conf",
"JSViewer",
"MaskedColumn",
"NdarrayMixin",
"QTable",
"Row",
"SCEngine",
"SerializedColumn",
"SortedArray",
"StringTruncateWarning",
"Table",
"TableAttribute",
"TableColumns",
"TableFormatter",
"TableGroups",
"TableMergeError",
"TableReplaceWarning",
"conf",
"connect",
"hstack",
"join",
"registry",
"represent_mixins_as_columns",
"setdiff",
"unique",
"vstack",
"dstack",
"conf",
"join_skycoord",
"join_distance",
"PprintIncludeExclude",
]
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
"col{0}",
"The template that determines the name of a column if it cannot be "
"determined. Uses new-style (format method) string formatting.",
aliases=["astropy.table.column.auto_colname"],
)
default_notebook_table_class = _config.ConfigItem(
"table-striped table-bordered table-condensed",
"The table class to be used in Jupyter notebooks when displaying "
"tables (and not overridden). See <https://getbootstrap.com/css/#tables "
"for a list of useful bootstrap classes.",
)
replace_warnings = _config.ConfigItem(
[],
"List of conditions for issuing a warning when replacing a table "
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
"string_list",
)
replace_inplace = _config.ConfigItem(
False,
"Always use in-place update of a table column when using setitem, "
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.",
)
conf = Conf()
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry
from . import connect
from .bst import BST
from .groups import ColumnGroups, TableGroups
from .operations import (
TableMergeError,
dstack,
hstack,
join,
join_distance,
join_skycoord,
setdiff,
unique,
vstack,
)
from .serialize import SerializedColumn, represent_mixins_as_columns
from .soco import SCEngine
from .sorted_array import SortedArray
from .table import (
NdarrayMixin,
PprintIncludeExclude,
QTable,
Row,
Table,
TableAttribute,
TableColumns,
TableFormatter,
TableReplaceWarning,
)
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.misc.pandas.connect
import astropy.io.votable.connect
from .jsviewer import JSViewer
if optional_deps.HAS_ASDF_ASTROPY:
import asdf_astropy.io.connect
|
2dd6ba656c766994130af5c04d95a05d68bcbe5d2305a4f8fa68722b2acd39d4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy.units import Quantity, StructuredUnit, Unit
from astropy.utils.console import color_print
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.metadata import MetaData
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups, pprint
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter("always", StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = {
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
np.isfinite,
np.isinf,
np.isnan,
np.sign,
np.signbit,
}
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
newcol = col.copy() if hasattr(col, "copy") else deepcopy(col)
# If the column has info defined, we copy it and adjust any indices
# to point to the copied column. By guarding with the if statement,
# we avoid side effects (of creating the default info instance).
if "info" in col.__dict__:
newcol.info = col.info
if copy_indices and col.info.indices:
newcol.info.indices = deepcopy(col.info.indices)
for index in newcol.info.indices:
index.replace_col(col, newcol)
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError(
f"Cannot set any element of {type(self).__name__} class to True"
)
def _expand_string_array_for_values(arr, values):
"""
For string-dtype return a version of ``arr`` that is wide enough for ``values``.
If ``arr`` is not string-dtype or does not need expansion then return ``arr``.
Parameters
----------
arr : np.ndarray
Input array
values : scalar or array-like
Values for width comparison for string arrays
Returns
-------
arr_expanded : np.ndarray
"""
if arr.dtype.kind in ("U", "S") and values is not np.ma.masked:
# Find the length of the longest string in the new values.
values_str_len = np.char.str_len(values).max()
# Determine character repeat count of arr.dtype. Returns a positive
# int or None (something like 'U0' is not possible in numpy). If new values
# are longer than current then make a new (wider) version of arr.
arr_str_len = dtype_bytes_or_chars(arr.dtype)
if arr_str_len and values_str_len > arr_str_len:
arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len)
arr = arr.astype(arr_dtype)
return arr
def _convert_sequence_data_to_array(data, dtype=None):
"""Convert N-d sequence-like data to ndarray or MaskedArray.
This is the core function for converting Python lists or list of lists to a
numpy array. This handles embedded np.ma.masked constants in ``data`` along
with the special case of an homogeneous list of MaskedArray elements.
Considerations:
- np.ma.array is about 50 times slower than np.array for list input. This
function avoids using np.ma.array on list input.
- np.array emits a UserWarning for embedded np.ma.masked, but only for int
or float inputs. For those it converts to np.nan and forces float dtype.
For other types np.array is inconsistent, for instance converting
np.ma.masked to "0.0" for str types.
- Searching in pure Python for np.ma.masked in ``data`` is comparable in
speed to calling ``np.array(data)``.
- This function may end up making two additional copies of input ``data``.
Parameters
----------
data : N-d sequence
Input data, typically list or list of lists
dtype : None or dtype-like
Output datatype (None lets np.array choose)
Returns
-------
np_data : np.ndarray or np.ma.MaskedArray
"""
np_ma_masked = np.ma.masked # Avoid repeated lookups of this object
# Special case of an homogeneous list of MaskedArray elements (see #8977).
# np.ma.masked is an instance of MaskedArray, so exclude those values.
if (
hasattr(data, "__len__")
and len(data) > 0
and all(
isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked
for val in data
)
):
np_data = np.ma.array(data, dtype=dtype)
return np_data
# First convert data to a plain ndarray. If there are instances of np.ma.masked
# in the data this will issue a warning for int and float.
with warnings.catch_warnings(record=True) as warns:
# Ensure this warning from numpy is always enabled and that it is not
# converted to an error (which can happen during pytest).
warnings.filterwarnings(
"always", category=UserWarning, message=".*converting a masked element.*"
)
# FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291
# and https://github.com/numpy/numpy/issues/18425.
warnings.filterwarnings(
"always",
category=FutureWarning,
message=".*Promotion of numbers and bools to strings.*",
)
try:
np_data = np.array(data, dtype=dtype)
except np.ma.MaskError:
# Catches case of dtype=int with masked values, instead let it
# convert to float
np_data = np.array(data)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity.
# First try to interpret the data as Quantity. If that still fails then fall
# through to object
try:
np_data = Quantity(data, dtype)
except Exception:
dtype = object
np_data = np.array(data, dtype=dtype)
if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0):
# Implies input was a scalar or an empty list (e.g. initializing an
# empty table with pre-declared names and dtypes but no data). Here we
# need to fall through to initializing with the original data=[].
return data
# If there were no warnings and the data are int or float, then we are done.
# Other dtypes like string or complex can have masked values and the
# np.array() conversion gives the wrong answer (e.g. converting np.ma.masked
# to the string "0.0").
if len(warns) == 0 and np_data.dtype.kind in ("i", "f"):
return np_data
# Now we need to determine if there is an np.ma.masked anywhere in input data.
# Make a statement like below to look for np.ma.masked in a nested sequence.
# Because np.array(data) succeeded we know that `data` has a regular N-d
# structure. Find ma_masked:
# any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data)
# Using this eval avoids creating a copy of `data` in the more-usual case of
# no masked elements.
any_statement = "d0 is ma_masked"
for ii in reversed(range(np_data.ndim)):
if ii == 0:
any_statement = f"any({any_statement} for d0 in data)"
elif ii == np_data.ndim - 1:
any_statement = f"any(d{ii} is ma_masked for d{ii} in d{ii-1})"
else:
any_statement = f"any({any_statement} for d{ii} in d{ii-1})"
context = {"ma_masked": np.ma.masked, "data": data}
has_masked = eval(any_statement, context)
# If there are any masks then explicitly change each one to a fill value and
# set a mask boolean array. If not has_masked then we're done.
if has_masked:
mask = np.zeros(np_data.shape, dtype=bool)
data_filled = np.array(data, dtype=object)
# Make type-appropriate fill value based on initial conversion.
if np_data.dtype.kind == "U":
fill = ""
elif np_data.dtype.kind == "S":
fill = b""
else:
# Zero works for every numeric type.
fill = 0
ranges = [range(dim) for dim in np_data.shape]
for idxs in itertools.product(*ranges):
val = data_filled[idxs]
if val is np_ma_masked:
data_filled[idxs] = fill
mask[idxs] = True
elif isinstance(val, bool) and dtype is None:
# If we see a bool and dtype not specified then assume bool for
# the entire array. Not perfect but in most practical cases OK.
# Unfortunately numpy types [False, 0] as int, not bool (and
# [False, np.ma.masked] => array([0.0, np.nan])).
dtype = bool
# If no dtype is provided then need to convert back to list so np.array
# does type autodetection.
if dtype is None:
data_filled = data_filled.tolist()
# Use np.array first to convert `data` to ndarray (fast) and then make
# masked array from an ndarray with mask (fast) instead of from `data`.
np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask)
return np_data
def _make_compare(oper):
"""
Make Column comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
Parameters
----------
oper : str
Operator name
"""
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# If other is a Quantity, we should let it do the work, since
# it can deal with our possible unit (which, for MaskedColumn,
# would get dropped below, as '.data' is accessed in super()).
if isinstance(other, Quantity):
return NotImplemented
# If we are unicode and other is a column with bytes, defer to it for
# doing the unicode sandwich. This avoids problems like those
# discussed in #6838 and #6899.
if (
self.dtype.kind == "U"
and isinstance(other, Column)
and other.dtype.kind == "S"
):
return NotImplemented
# If we are bytes, encode other as needed.
if self.dtype.char == "S":
other = self._encode_str(other)
# Now just let the regular ndarray.__eq__, etc., take over.
result = getattr(super(Column, self), op)(other)
# But we should not return Column instances for this case.
return result.data if isinstance(result, Column) else result
return _compare
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attr_names = BaseColumnInfo.attr_names | {"groups"}
_attrs_no_copy = BaseColumnInfo._attrs_no_copy | {"groups"}
attrs_from_parent = attr_names
_supports_indexing = True
# For structured columns, data is used to store a dict of columns.
# Store entries in that dict as name.key instead of name.data.key.
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
result = super()._represent_as_dict()
names = self._parent.dtype.names
# For a regular column, we are done, but for a structured
# column, we use a SerializedColumns to store the pieces.
if names is None:
return result
from .serialize import SerializedColumn
data = SerializedColumn()
# If this column has a StructuredUnit, we split it and store
# it on the corresponding part. Otherwise, we just store it
# as an attribute below. All other attributes we remove from
# the parts, so that we do not store them multiple times.
# (Note that attributes are not linked to the parent, so it
# is safe to reset them.)
# TODO: deal with (some of) this in Column.__getitem__?
# Alternatively: should we store info on the first part?
# TODO: special-case format somehow? Can we have good formats
# for structured columns?
unit = self.unit
if isinstance(unit, StructuredUnit) and len(unit) == len(names):
units = unit.values()
unit = None # No need to store as an attribute as well.
else:
units = [None] * len(names)
for name, part_unit in zip(names, units):
part = Column(self._parent[name])
part.unit = part_unit
part.description = None
part.meta = {}
part.format = None
data[name] = part
# Create the attributes required to reconstruct the column.
result["data"] = data
# Store the shape if needed. Just like scalar data, a structured data
# column (e.g. with dtype `f8,i8`) can be multidimensional within each
# row and have a shape, and that needs to be distinguished from the
# case that each entry in the structure has the same shape (e.g.,
# distinguist a column with dtype='f8,i8' and 2 elements per row from
# one with dtype '2f8,2i8' and just one element per row).
if shape := self._parent.shape[1:]:
result["shape"] = list(shape)
# Also store the standard info attributes since these are
# stored on the parent and can thus just be passed on as
# arguments. TODO: factor out with essentially the same
# code in serialize._represent_mixin_as_column.
if unit is not None and unit != "":
result["unit"] = unit
if self.format is not None:
result["format"] = self.format
if self.description is not None:
result["description"] = self.description
if self.meta:
result["meta"] = self.meta
return result
def _construct_from_dict(self, map):
if not isinstance(map.get("data"), dict):
return super()._construct_from_dict(map)
# Reconstruct a structured Column, by first making an empty column
# and then filling it with the structured data.
data = map.pop("data")
shape = tuple(map.pop("shape", ()))
# There are three elements in the shape of `part`:
# (table length, shape of structured column, shape of part like '3f8')
# The column `shape` only includes the second, so by adding one to its
# length to include the table length, we pick off a possible last bit.
dtype = np.dtype(
[
(name, part.dtype, part.shape[len(shape) + 1 :])
for name, part in data.items()
]
)
units = tuple(col.info.unit for col in data.values())
if all(unit is not None for unit in units):
map["unit"] = StructuredUnit(units, dtype)
map.update(dtype=dtype, shape=shape, length=len(data[dtype.names[0]]))
# Construct the empty column from `map` (note: 'data' removed above).
result = super()._construct_from_dict(map)
# Fill it with the structured data.
for name in dtype.names:
result[name] = data[name]
return result
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "unit", "format", "description")
)
return self._parent_cls(length=length, **attrs)
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Column this is just the column itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if data is None:
self_data = np.zeros((length,) + shape, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, "_name"):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = Quantity(data, unit, dtype=dtype, copy=copy).value
# If 'info' has been defined, copy basic properties (if needed).
if "info" in data.__dict__:
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == "S":
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = None if name is None else str(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, "indices", [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def value(self):
"""
An alias for the existing ``data`` attribute.
"""
return self.data
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, "_parent_table", None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order="C", data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ("_name", "_unit", "_format", "description", "meta", "indices")
attrs = dict(zip(names, state[-1]))
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (
self.name,
self.unit,
self.format,
self.description,
self.meta,
self.indices,
)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, "indices"): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
if "info" in getattr(obj, "__dict__", {}):
self.info = obj.info
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if self.shape != out_arr.shape or (
isinstance(out_arr, BaseColumn)
and (context is not None and context[0] in _comparison_functions)
):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
if val is not None:
val = str(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, "_format", None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{}': could not display "
"values in this column using this format".format(self.name)
) from err
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
yield from _pformat_col_iter(
self, -1, show_name=False, show_unit=False, show_dtype=False, outs={}
)
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : bool
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError("Comparison `col` must be a Column or MaskedColumn object")
attrs = ("name", "unit", "dtype", "format", "description", "meta")
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(
self,
max_lines=None,
show_name=True,
show_unit=False,
show_dtype=False,
html=False,
):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
html=html,
)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(
self,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
n_header = outs["n_header"]
for i, line in enumerate(lines):
if i < n_header:
color_print(line, "red")
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(
self, max_lines=max_lines, show_name=show_name, show_unit=show_unit
)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict="silent")
@unit.deleter
def unit(self):
self._unit = None
def searchsorted(self, v, side="left", sorter=None):
# For bytes type data, encode the `v` value as UTF-8 (if necessary) before
# calling searchsorted. This prevents a factor of 1000 slowdown in
# searchsorted in this case.
a = self.data
if a.dtype.kind == "S" and not isinstance(v, bytes):
v = np.asarray(v)
if v.dtype.kind == "U":
v = np.char.encode(v, "utf-8")
return np.searchsorted(a, v, side=side, sorter=sorter)
searchsorted.__doc__ = np.ndarray.searchsorted.__doc__
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, "_groups"):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``.
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``.
"""
if self.parent_table:
if hasattr(self.parent_table, "_groups"):
out._groups = groups.ColumnGroups(
out, indices=self.parent_table._groups._indices
)
elif hasattr(self, "_groups"):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(
self, self.unit, copy=False, dtype=self.dtype, order="A", subok=True
)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : unit-like
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of tuple
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self.
"""
for attr in ("name", "unit", "_format", "description"):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, "meta", None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode("utf-8")
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == "U":
arr = np.char.encode(arr, encoding="utf-8")
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == "S":
return np.chararray.decode(self, encoding="utf-8").tolist()
else:
return super().tolist()
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ndarray` object, you can use
one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
def __new__(
cls,
data=None,
name=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError(
"Cannot convert a MaskedColumn with masked value to a Column"
)
self = super().__new__(
cls,
data=data,
name=name,
dtype=dtype,
shape=shape,
length=length,
description=description,
unit=unit,
format=format,
meta=meta,
copy=copy,
copy_indices=copy_indices,
)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError(
"cannot set mask value to a column in non-masked Table"
)
super().__setattr__(item, value)
if item == "unit" and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (
("name", self.name),
("dtype", dtype_info_name(self.dtype)),
("shape", shape),
("unit", unit),
("format", self.format),
("description", self.description),
("length", len(self)),
):
if val is not None:
descr_vals.append(f"{attr}={val!r}")
descr = "<" + " ".join(descr_vals) + ">\n"
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html
)
out = descr + "\n".join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return "\n".join(lines)
def __bytes__(self):
return str(self).encode("utf-8")
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn(
"truncated right side string(s) longer than {} "
"character(s) during assignment".format(self_str_len),
StringTruncateWarning,
stacklevel=3,
)
def __setitem__(self, index, value):
if self.dtype.char == "S":
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
__eq__ = _make_compare("__eq__")
__ne__ = _make_compare("__ne__")
__gt__ = _make_compare("__gt__")
__lt__ = _make_compare("__lt__")
__ge__ = _make_compare("__ge__")
__le__ = _make_compare("__le__")
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == "O":
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
self_for_insert = _expand_string_array_for_values(self, values)
data = np.insert(self_for_insert, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
def _represent_as_dict(self):
out = super()._represent_as_dict()
# If we are a structured masked column, then our parent class,
# ColumnInfo, will already have set up a dict with masked parts,
# which will be serialized later, so no further work needed here.
if self._parent.dtype.names is not None:
return out
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
# Note: a driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out["data"] = col.data.data
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = col.mask
elif method == "null_value":
pass
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str, or None
Value used when filling masked column elements
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ma.MaskedArray` object, you can
use one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
info = MaskedColumnInfo()
def __new__(
cls,
data=None,
name=None,
mask=None,
fill_value=None,
dtype=None,
shape=(),
length=0,
description=None,
unit=None,
format=None,
meta=None,
copy=False,
copy_indices=True,
):
if mask is None:
# If mask is None then we need to determine the mask (if any) from the data.
# The naive method is looking for a mask attribute on data, but this can fail,
# see #8816. Instead use ``MaskedArray`` to do the work.
mask = ma.MaskedArray(data).mask
if mask is np.ma.nomask:
# Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below.
mask = False
elif copy:
mask = mask.copy()
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(
data,
dtype=dtype,
shape=shape,
length=length,
name=name,
unit=unit,
format=format,
description=description,
meta=meta,
copy=copy,
copy_indices=copy_indices,
)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# The above process preserves info relevant for Column, but this does
# not include serialize_method (and possibly other future attributes)
# relevant for MaskedColumn, so we set info explicitly.
if "info" in getattr(data, "__dict__", {}):
self.info = data.info
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None:
data_fill_value = getattr(data, "fill_value", None)
if (
data_fill_value is not None
and data_fill_value != np.ma.default_fill_value(data.dtype)
):
fill_value = np.array(data_fill_value, self.dtype)[()]
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work.
"""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
"""The plain MaskedArray data held by this column."""
out = self.view(np.ma.MaskedArray)
# By default, a MaskedArray view will set the _baseclass to be the
# same as that of our own class, i.e., BaseColumn. Since we want
# to return a plain MaskedArray, we reset the baseclass accordingly.
out._baseclass = np.ndarray
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = (
self.parent_table.Column if (self.parent_table is not None) else Column
)
out = column_cls(
name=self.name,
data=data,
unit=self.unit,
format=self.format,
description=self.description,
meta=deepcopy(self.meta),
)
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
mask : bool or array-like
Mask value(s) to insert. If not supplied, and values does not have
a mask either, then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == "O":
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
self_ma = _expand_string_array_for_values(self_ma, values)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
mask = getattr(values, "mask", np.ma.nomask)
if mask is np.ma.nomask:
if self.dtype.kind == "O":
mask = False
else:
mask = np.zeros(np.shape(values), dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
# TODO: this part is essentially the same as what is done in
# __array_finalize__ and could probably be called directly in our
# override of __getitem__ in _columns_mixins.pyx). Refactor?
if "info" in self.__dict__:
out.info = self.info
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == "S":
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(""))
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
66dc43b7328a5ae239eafe443d463af8c641280cd510cf37160c8600fb0996f1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
__all__ = ["BST"]
class MaxValue:
"""
Represents an infinite value for purposes
of tuple comparison.
"""
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __repr__(self):
return "MAX"
__str__ = __repr__
class MinValue:
"""
The opposite of MaxValue, i.e. a representation of
negative infinity.
"""
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __repr__(self):
return "MIN"
__str__ = __repr__
class Epsilon:
"""
Represents the "next largest" version of a given value,
so that for all valid comparisons we have
x < y < Epsilon(y) < z whenever x < y < z and x, z are
not Epsilon objects.
Parameters
----------
val : object
Original value
"""
__slots__ = ("val",)
def __init__(self, val):
self.val = val
def __lt__(self, other):
if self.val == other:
return False
return self.val < other
def __gt__(self, other):
if self.val == other:
return True
return self.val > other
def __eq__(self, other):
return False
def __repr__(self):
return repr(self.val) + " + epsilon"
class Node:
"""
An element in a binary search tree, containing
a key, data, and references to children nodes and
a parent node.
Parameters
----------
key : tuple
Node key
data : list or int
Node data
"""
__lt__ = lambda x, y: x.key < y.key
__le__ = lambda x, y: x.key <= y.key
__eq__ = lambda x, y: x.key == y.key
__ge__ = lambda x, y: x.key >= y.key
__gt__ = lambda x, y: x.key > y.key
__ne__ = lambda x, y: x.key != y.key
__slots__ = ("key", "data", "left", "right")
# each node has a key and data list
def __init__(self, key, data):
self.key = key
self.data = data if isinstance(data, list) else [data]
self.left = None
self.right = None
def replace(self, child, new_child):
"""
Replace this node's child with a new child.
"""
if self.left is not None and self.left == child:
self.left = new_child
elif self.right is not None and self.right == child:
self.right = new_child
else:
raise ValueError("Cannot call replace() on non-child")
def remove(self, child):
"""
Remove the given child.
"""
self.replace(child, None)
def set(self, other):
"""
Copy the given node.
"""
self.key = other.key
self.data = other.data[:]
def __str__(self):
return str((self.key, self.data))
def __repr__(self):
return str(self)
class BST:
"""
A basic binary search tree in pure Python, used
as an engine for indexing.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
"""
NodeClass = Node
def __init__(self, data, row_index, unique=False):
self.root = None
self.size = 0
self.unique = unique
for key, row in zip(data, row_index):
self.add(tuple(key), row)
def add(self, key, data=None):
"""
Add a key, data pair.
"""
if data is None:
data = key
self.size += 1
node = self.NodeClass(key, data)
curr_node = self.root
if curr_node is None:
self.root = node
return
while True:
if node < curr_node:
if curr_node.left is None:
curr_node.left = node
break
curr_node = curr_node.left
elif node > curr_node:
if curr_node.right is None:
curr_node.right = node
break
curr_node = curr_node.right
elif self.unique:
raise ValueError("Cannot insert non-unique value")
else: # add data to node
curr_node.data.extend(node.data)
curr_node.data = sorted(curr_node.data)
return
def find(self, key):
"""
Return all data values corresponding to a given key.
Parameters
----------
key : tuple
Input key
Returns
-------
data_vals : list
List of rows corresponding to the input key
"""
node, parent = self.find_node(key)
return node.data if node is not None else []
def find_node(self, key):
"""
Find the node associated with the given key.
"""
if self.root is None:
return (None, None)
return self._find_recursive(key, self.root, None)
def shift_left(self, row):
"""
Decrement all rows larger than the given row.
"""
for node in self.traverse():
node.data = [x - 1 if x > row else x for x in node.data]
def shift_right(self, row):
"""
Increment all rows greater than or equal to the given row.
"""
for node in self.traverse():
node.data = [x + 1 if x >= row else x for x in node.data]
def _find_recursive(self, key, node, parent):
try:
if key == node.key:
return (node, parent)
elif key > node.key:
if node.right is None:
return (None, None)
return self._find_recursive(key, node.right, node)
else:
if node.left is None:
return (None, None)
return self._find_recursive(key, node.left, node)
except TypeError: # wrong key type
return (None, None)
def traverse(self, order="inorder"):
"""
Return nodes of the BST in the given order.
Parameters
----------
order : str
The order in which to recursively search the BST.
Possible values are:
"preorder": current node, left subtree, right subtree
"inorder": left subtree, current node, right subtree
"postorder": left subtree, right subtree, current node
"""
if order == "preorder":
return self._preorder(self.root, [])
elif order == "inorder":
return self._inorder(self.root, [])
elif order == "postorder":
return self._postorder(self.root, [])
raise ValueError(f'Invalid traversal method: "{order}"')
def items(self):
"""
Return BST items in order as (key, data) pairs.
"""
return [(x.key, x.data) for x in self.traverse()]
def sort(self):
"""
Make row order align with key order.
"""
i = 0
for node in self.traverse():
num_rows = len(node.data)
node.data = list(range(i, i + num_rows))
i += num_rows
def sorted_data(self):
"""
Return BST rows sorted by key values.
"""
return [x for node in self.traverse() for x in node.data]
def _preorder(self, node, lst):
if node is None:
return lst
lst.append(node)
self._preorder(node.left, lst)
self._preorder(node.right, lst)
return lst
def _inorder(self, node, lst):
if node is None:
return lst
self._inorder(node.left, lst)
lst.append(node)
self._inorder(node.right, lst)
return lst
def _postorder(self, node, lst):
if node is None:
return lst
self._postorder(node.left, lst)
self._postorder(node.right, lst)
lst.append(node)
return lst
def _substitute(self, node, parent, new_node):
if node is self.root:
self.root = new_node
else:
parent.replace(node, new_node)
def remove(self, key, data=None):
"""
Remove data corresponding to the given key.
Parameters
----------
key : tuple
The key to remove
data : int or None
If None, remove the node corresponding to the given key.
If not None, remove only the given data value from the node.
Returns
-------
successful : bool
True if removal was successful, false otherwise
"""
node, parent = self.find_node(key)
if node is None:
return False
if data is not None:
if data not in node.data:
raise ValueError("Data does not belong to correct node")
elif len(node.data) > 1:
node.data.remove(data)
return True
if node.left is None and node.right is None:
self._substitute(node, parent, None)
elif node.left is None and node.right is not None:
self._substitute(node, parent, node.right)
elif node.right is None and node.left is not None:
self._substitute(node, parent, node.left)
else:
# find largest element of left subtree
curr_node = node.left
parent = node
while curr_node.right is not None:
parent = curr_node
curr_node = curr_node.right
self._substitute(curr_node, parent, curr_node.left)
node.set(curr_node)
self.size -= 1
return True
def is_valid(self):
"""
Returns whether this is a valid BST.
"""
return self._is_valid(self.root)
def _is_valid(self, node):
if node is None:
return True
return (
(node.left is None or node.left <= node)
and (node.right is None or node.right >= node)
and self._is_valid(node.left)
and self._is_valid(node.right)
)
def range(self, lower, upper, bounds=(True, True)):
"""
Return all nodes with keys in the given range.
Parameters
----------
lower : tuple
Lower bound
upper : tuple
Upper bound
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
"""
nodes = self.range_nodes(lower, upper, bounds)
return [x for node in nodes for x in node.data]
def range_nodes(self, lower, upper, bounds=(True, True)):
"""
Return nodes in the given range.
"""
if self.root is None:
return []
# op1 is <= or <, op2 is >= or >
op1 = operator.le if bounds[0] else operator.lt
op2 = operator.ge if bounds[1] else operator.gt
return self._range(lower, upper, op1, op2, self.root, [])
def same_prefix(self, val):
"""
Assuming the given value has smaller length than keys, return
nodes whose keys have this value as a prefix.
"""
if self.root is None:
return []
nodes = self._same_prefix(val, self.root, [])
return [x for node in nodes for x in node.data]
def _range(self, lower, upper, op1, op2, node, lst):
if op1(lower, node.key) and op2(upper, node.key):
lst.append(node)
if upper > node.key and node.right is not None:
self._range(lower, upper, op1, op2, node.right, lst)
if lower < node.key and node.left is not None:
self._range(lower, upper, op1, op2, node.left, lst)
return lst
def _same_prefix(self, val, node, lst):
prefix = node.key[: len(val)]
if prefix == val:
lst.append(node)
if prefix <= val and node.right is not None:
self._same_prefix(val, node.right, lst)
if prefix >= val and node.left is not None:
self._same_prefix(val, node.left, lst)
return lst
def __repr__(self):
return f"<{self.__class__.__name__}>"
def _print(self, node, level):
line = "\t" * level + str(node) + "\n"
if node.left is not None:
line += self._print(node.left, level + 1)
if node.right is not None:
line += self._print(node.right, level + 1)
return line
@property
def height(self):
"""
Return the BST height.
"""
return self._height(self.root)
def _height(self, node):
if node is None:
return -1
return max(self._height(node.left), self._height(node.right)) + 1
def replace_rows(self, row_map):
"""
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their nodes deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
"""
for key, data in self.items():
data[:] = [row_map[x] for x in data if x in row_map]
|
803e2b03a3ef7d53638c74087331e08bc5df8b7dd8ec98c9cf711aca338834c5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
from copy import deepcopy
from importlib import import_module
import numpy as np
from astropy.units.quantity import QuantityInfo
from astropy.utils.data_info import MixinInfo
from .column import Column, MaskedColumn
from .table import QTable, Table, has_info_class
# TODO: some of this might be better done programmatically, through
# code like
# __construct_mixin_classes += tuple(
# f'astropy.coordinates.representation.{cls.__name__}'
# for cls in (list(coorep.REPRESENTATION_CLASSES.values())
# + list(coorep.DIFFERENTIAL_CLASSES.values()))
# if cls.__name__ in coorep.__all__)
# However, to avoid very hard to track import issues, the definition
# should then be done at the point where it is actually needed,
# using local imports. See also
# https://github.com/astropy/astropy/pull/10210#discussion_r419087286
__construct_mixin_classes = (
"astropy.time.core.Time",
"astropy.time.core.TimeDelta",
"astropy.units.quantity.Quantity",
"astropy.units.function.logarithmic.Magnitude",
"astropy.units.function.logarithmic.Decibel",
"astropy.units.function.logarithmic.Dex",
"astropy.coordinates.angles.Latitude",
"astropy.coordinates.angles.Longitude",
"astropy.coordinates.angles.Angle",
"astropy.coordinates.distances.Distance",
"astropy.coordinates.earth.EarthLocation",
"astropy.coordinates.sky_coordinate.SkyCoord",
"astropy.coordinates.polarization.StokesCoord",
"astropy.table.ndarray_mixin.NdarrayMixin",
"astropy.table.table_helpers.ArrayWrapper",
"astropy.table.column.Column",
"astropy.table.column.MaskedColumn",
"astropy.utils.masked.core.MaskedNDArray",
# Representations
"astropy.coordinates.representation.cartesian.CartesianRepresentation",
"astropy.coordinates.representation.spherical.UnitSphericalRepresentation",
"astropy.coordinates.representation.spherical.RadialRepresentation",
"astropy.coordinates.representation.spherical.SphericalRepresentation",
"astropy.coordinates.representation.spherical.PhysicsSphericalRepresentation",
"astropy.coordinates.representation.cylindrical.CylindricalRepresentation",
"astropy.coordinates.representation.cartesian.CartesianDifferential",
"astropy.coordinates.representation.spherical.UnitSphericalDifferential",
"astropy.coordinates.representation.spherical.SphericalDifferential",
"astropy.coordinates.representation.spherical.UnitSphericalCosLatDifferential",
"astropy.coordinates.representation.spherical.SphericalCosLatDifferential",
"astropy.coordinates.representation.spherical.RadialDifferential",
"astropy.coordinates.representation.spherical.PhysicsSphericalDifferential",
"astropy.coordinates.representation.cylindrical.CylindricalDifferential",
# Deprecated paths
"astropy.coordinates.representation.CartesianRepresentation",
"astropy.coordinates.representation.UnitSphericalRepresentation",
"astropy.coordinates.representation.RadialRepresentation",
"astropy.coordinates.representation.SphericalRepresentation",
"astropy.coordinates.representation.PhysicsSphericalRepresentation",
"astropy.coordinates.representation.CylindricalRepresentation",
"astropy.coordinates.representation.CartesianDifferential",
"astropy.coordinates.representation.UnitSphericalDifferential",
"astropy.coordinates.representation.SphericalDifferential",
"astropy.coordinates.representation.UnitSphericalCosLatDifferential",
"astropy.coordinates.representation.SphericalCosLatDifferential",
"astropy.coordinates.representation.RadialDifferential",
"astropy.coordinates.representation.PhysicsSphericalDifferential",
"astropy.coordinates.representation.CylindricalDifferential",
)
class SerializedColumnInfo(MixinInfo):
"""
Minimal info to allow SerializedColumn to be recognized as a mixin Column.
Used to help create a dict of columns in ColumnInfo for structured data.
"""
def _represent_as_dict(self):
# SerializedColumn is already a `dict`, so we can return it directly.
return self._parent
class SerializedColumn(dict):
"""Subclass of dict used to serialize mixin columns.
It is used in the representation to contain the name and possible
other info for a mixin column or attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
"""
info = SerializedColumnInfo()
@property
def shape(self):
"""Minimal shape implementation to allow use as a mixin column.
Returns the shape of the first item that has a shape at all,
or ``()`` if none of the values has a shape attribute.
"""
return next(
(value.shape for value in self.values() if hasattr(value, "shape")), ()
)
def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial in (
("unit", lambda x: x is not None and x != ""),
("format", lambda x: x is not None),
("description", lambda x: x is not None),
("meta", lambda x: x),
):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
# Find column attributes that have the same length as the column itself.
# These will be stored in the table as new columns (aka "data attributes").
# Examples include SkyCoord.ra (what is typically considered the data and is
# always an array) and Skycoord.obs_time (which can be a scalar or an
# array).
data_attrs = [
key
for key, value in obj_attrs.items()
if getattr(value, "shape", ())[:1] == col.shape[:1]
]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data for
# MaskedColumn). For primary data, we attempt to store any info on
# the format, etc., on the column, but not for ancillary data (e.g.,
# no sense to use a float format for a mask).
is_primary = data_attr == col.info._represent_as_dict_primary_data
if is_primary:
new_name = name
new_info = info
else:
new_name = name + "." + data_attr
new_info = {}
if not has_info_class(data, MixinInfo):
col_cls = (
MaskedColumn
if (hasattr(data, "mask") and np.any(data.mask))
else Column
)
data = col_cls(data, name=new_name, **new_info)
if is_primary:
# Don't store info in the __serialized_columns__ dict for this column
# since this is redundant with info stored on the new column.
info = {}
# Recurse. If this is anything that needs further serialization (i.e.,
# a Mixin column, a structured Column, a MaskedColumn for which mask is
# stored, etc.), it will define obj_attrs[new_name]. Otherwise, it will
# just add to new_cols and all we have to do is to link to the new name.
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(
obj_attrs.pop(new_name, {"name": new_name})
)
# Strip out from info any attributes defined by the parent,
# and store whatever remains.
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs["__info__"] = info
# Store the fully qualified class name
if not isinstance(col, SerializedColumn):
obj_attrs.setdefault("__class__", col.__module__ + "." + col.__class__.__name__)
mixin_cols[name] = obj_attrs
def represent_mixins_as_columns(tbl, exclude_classes=()):
"""Represent input Table ``tbl`` using only `~astropy.table.Column`
or `~astropy.table.MaskedColumn` objects.
This function represents any mixin columns like `~astropy.time.Time` in
``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns
a new Table. A single mixin column may be split into multiple column
components as needed for fully representing the column. This includes the
possibility of recursive splitting, as shown in the example below. The
new column names are formed as ``<column_name>.<component>``, e.g.
``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``.
In addition to splitting columns, this function updates the table ``meta``
dictionary to include a dict named ``__serialized_columns__`` which provides
additional information needed to construct the original mixin columns from
the split columns.
This function is used by astropy I/O when writing tables to ECSV, FITS,
HDF5 formats.
Note that if the table does not include any mixin columns then the original
table is returned with no update to ``meta``.
Parameters
----------
tbl : `~astropy.table.Table` or subclass
Table to represent mixins as Columns
exclude_classes : tuple of class
Exclude any mixin columns which are instannces of any classes in the tuple
Returns
-------
tbl : `~astropy.table.Table`
New Table with updated columns, or else the original input ``tbl``
Examples
--------
>>> from astropy.table import Table, represent_mixins_as_columns
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord
>>> x = [100.0, 200.0]
>>> obstime = Time([1999.0, 2000.0], format='jyear')
>>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime)
>>> tbl = Table([sc, x], names=['sc', 'x'])
>>> represent_mixins_as_columns(tbl)
<Table length=2>
sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x
deg deg
float64 float64 float64 float64 float64
------- ------- -------------- -------------- -------
1.0 3.0 2451180.0 -0.25 100.0
2.0 4.0 2451545.0 0.0 200.0
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(
col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes
)
# If no metadata was created then just return the original table.
if mixin_cols:
meta = deepcopy(tbl.meta)
meta["__serialized_columns__"] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
else:
out = tbl
for col in out.itercols():
if not isinstance(col, Column) and col.__class__ not in exclude_classes:
# This catches columns for which info has not been set up right and
# therefore were not converted. See the corresponding test in
# test_mixin.py for an example.
raise TypeError(
"failed to represent column "
f"{col.info.name!r} ({col.__class__.__name__}) as one "
"or more Column subclasses. This looks like a mixin class "
"that does not have the correct _represent_as_dict() method "
"in the class `info` attribute."
)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
cls_full_name = obj_attrs.pop("__class__", None)
if cls_full_name is None:
# We're dealing with a SerializedColumn holding columns, stored in
# obj_attrs. For this case, info holds the name (and nothing else).
mixin = SerializedColumn(obj_attrs)
mixin.info.name = info["name"]
return mixin
if cls_full_name not in __construct_mixin_classes:
raise ValueError(f"unsupported class for construct {cls_full_name}")
mod_name, _, cls_name = cls_full_name.rpartition(".")
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
class _TableLite(OrderedDict):
"""
Minimal table-like object for _construct_mixin_from_columns. This allows
manipulating the object like a Table but without the actual overhead
for a full Table.
More pressing, there is an issue with constructing MaskedColumn, where the
encoded Column components (data, mask) are turned into a MaskedColumn.
When this happens in a real table then all other columns are immediately
Masked and a warning is issued. This is not desirable.
"""
def add_column(self, col, index=0):
colnames = self.colnames
self[col.info.name] = col
for ii, name in enumerate(colnames):
if ii >= index:
self.move_to_end(name)
@property
def colnames(self):
return list(self.keys())
def itercols(self):
return self.values()
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
# A SerializedColumn can just link to a serialized column using a name
# (e.g., time.jd1), or itself be a mixin (e.g., coord.obstime). Note
# that in principle a mixin could have include a column called 'name',
# hence we check whether the value is actually a string (see gh-13232).
if "name" in val and isinstance(val["name"], str):
data_attrs_map[val["name"]] = name
else:
out_name = f"{new_name}.{name}"
_construct_mixin_from_columns(out_name, val, out)
data_attrs_map[out_name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# The order of data_attrs_map may not match the actual order, as it is set
# by the yaml description. So, sort names by position in the serialized table.
# Keep the index of the first column, so we can insert the new one there later.
names = sorted(data_attrs_map, key=out.colnames.index)
idx = out.colnames.index(names[0])
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name in names:
obj_attrs[data_attrs_map[name]] = out[name]
del out[name]
info = obj_attrs.pop("__info__", {})
if len(names) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column. First step is to get that first column which
# has been moved from `out` to `obj_attrs` above.
col = obj_attrs[data_attrs_map[name]]
# Now copy the relevant attributes
for attr, nontrivial in (
("unit", lambda x: x not in (None, "")),
("format", lambda x: x is not None),
("description", lambda x: x is not None),
("meta", lambda x: x),
):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info["name"] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if "__serialized_columns__" not in tbl.meta:
return tbl
meta = tbl.meta.copy()
mixin_cols = meta.pop("__serialized_columns__")
out = _TableLite(tbl.columns)
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo) for col in out.itercols())
out_cls = QTable if has_quantities else Table
return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
|
013e011beb275a1e3580d0ceabd928bfba824149cc1730177f37de938fa0a104 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
import string
from itertools import cycle
import numpy as np
from astropy.utils.data_info import ParentDtypeInfo
from .table import Column, Table
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table["i"] = np.arange(size)
self.table["a"] = np.random.random(size) # float
self.table["b"] = np.random.random(size) > 0.5 # bool
self.table["c"] = np.random.random((size, 10)) # 2d column
self.table["d"] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {"a": 1.2, "b": True, "c": np.repeat(1, 10), "d": "Z"}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table["a"] > 0.9)[0]
self.table_grouped = self.table.group_by("d")
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table["i"] = np.arange(1, size, 3)
self.other_table["f"] = np.random.random()
self.other_table.sort("f")
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2["g"] = np.random.random(size)
self.other_table_2["h"] = np.random.random((size, 10))
self.bool_mask = self.table["a"] > 0.6
def simple_table(size=3, cols=None, kinds="ifS", masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord("a") + ii) for ii in range(cols)]
letters = np.array(list(string.ascii_letters))
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == "i":
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == "f":
data = np.arange(size, dtype=np.float64) + jj
elif kind == "S":
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == "O":
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError("Unknown data kind")
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
import warnings
from astropy.io.votable.table import parse
from astropy.utils.data import get_pkg_data_filename
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(
get_pkg_data_filename("../io/votable/tests/data/regression.xml"),
pedantic=False,
)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapperInfo(ParentDtypeInfo):
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
"""Represent Column as a dict that can be serialized."""
col = self._parent
out = {"data": col.data}
return out
def _construct_from_dict(self, map):
"""Construct Column from ``map``."""
data = map.pop("data")
out = self._parent_cls(data, **map)
return out
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array.
TODO: think about the future of this class as it is mostly for demonstration
purposes (of the mixin protocol). Consider taking it out of core and putting
it into a tutorial. One advantage of having this in core is that it is
getting tested in the mixin testing though it doesn't work for multidim
data.
"""
info = ArrayWrapperInfo()
def __init__(self, data, copy=True):
self.data = np.array(data, copy=copy)
if "info" in getattr(data, "__dict__", ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item], copy=False)
if "info" in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Minimal equality testing, mostly for mixin unit tests."""
if isinstance(other, ArrayWrapper):
return self.data == other.data
else:
return self.data == other
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return f"<{self.__class__.__name__} name='{self.info.name}' data={self.data}>"
|
90d417f5311f7cdfe647dea11b504d8cb7944b71cff945e645a9a5234c2017e8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from astropy.modeling import models
from astropy.modeling.core import Fittable1DModel, Fittable2DModel
from .core import Kernel, Kernel1D, Kernel2D
from .utils import KernelSizeError, has_even_axis
__all__ = [
"Gaussian1DKernel",
"Gaussian2DKernel",
"CustomKernel",
"Box1DKernel",
"Box2DKernel",
"Tophat2DKernel",
"Trapezoid1DKernel",
"RickerWavelet1DKernel",
"RickerWavelet2DKernel",
"AiryDisk2DKernel",
"Moffat2DKernel",
"Model1DKernel",
"Model2DKernel",
"TrapezoidDisk2DKernel",
"Ring2DKernel",
]
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : int, optional
Size of the kernel array. Default = ⌊8*stddev+1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1.0 / (np.sqrt(2 * np.pi) * stddev), 0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self.normalize()
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float or `~astropy.units.Quantity` ['angle']
Rotation angle. If passed as a float, it is assumed to be in radians.
The rotation angle increases counterclockwise.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(
amplitude=1.0 / (2 * np.pi * x_stddev * y_stddev),
x_mean=0,
y_mean=0,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
)
self._default_size = _round_up_to_odd_integer(8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self.normalize()
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1.0 / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs["mode"] = "linear_interp"
super().__init__(**kwargs)
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1.0 / width**2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs["mode"] = "linear_interp"
super().__init__(**kwargs)
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1.0 / (np.pi * radius**2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self.normalize()
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(
1.0 / (np.pi * (radius_out**2 - radius_in**2)), 0, 0, radius_in, width
)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self.normalize()
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1.0, **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2.0 / slope)
super().__init__(**kwargs)
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1.0, **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2.0 / slope)
super().__init__(**kwargs)
self.normalize()
class RickerWavelet1DKernel(Kernel1D):
"""
1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet1DKernel
ricker_1d_kernel = RickerWavelet1DKernel(10)
plt.plot(ricker_1d_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width**3)
self._model = models.RickerWavelet1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class RickerWavelet2DKernel(Kernel2D):
"""
2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(10)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width**4)
self._model = models.RickerWavelet2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
# Compute amplitude, from
# https://en.wikipedia.org/wiki/Moffat_distribution
amplitude = (alpha - 1.0) / (np.pi * gamma * gamma)
self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha)
self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm)
super().__init__(**kwargs)
self.normalize()
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See Also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See Also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
`~astropy.convolution.KernelSizeError`
If array size is even.
See Also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter.
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
if has_even_axis(self):
raise KernelSizeError("Kernel size must be odd in all axes.")
# Check if array is bool
ones = self._array == 1.0
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
|
6dafe0e2d2907b3369dee2efa26a3dcf2aae4861239242a5a33e464ae866f035 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import copy
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .utils import (
KernelArithmeticError,
add_kernel_arrays_1D,
add_kernel_arrays_2D,
discretize_model,
)
MAX_NORMALIZATION = 100
__all__ = ["Kernel", "Kernel1D", "Kernel2D", "kernel_arithmetics"]
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : ndarray
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Absolute deviation of the sum of the kernel array values from
one.
"""
return np.abs(1.0 - self._array.sum())
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode="integral"):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == "integral":
normalization = self._array.sum()
elif mode == "peak":
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn(
"The kernel cannot be normalized because it sums to zero.",
AstropyUserWarning,
)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, "add")
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, "sub")
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int or None, optional
Size of the kernel array. Default = ⌊8*width+1⌋.
Only used if ``array`` is None.
array : ndarray or None, optional
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None,
array : ndarray or None, optional
Kernel array. Default is None.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance.
value : `astropy.convolution.Kernel`, float, or int
Value to operate with.
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
elif operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
elif operation == "mul":
raise KernelArithmeticError(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
elif operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
elif operation == "mul":
raise KernelArithmeticError(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value):
if operation != "mul":
raise KernelArithmeticError("Kernel operation not supported.")
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise KernelArithmeticError("Kernel operation not supported.")
return new_kernel
|
7782de68d4c25dbf4921d0233d0fd893793905bff864bbc6bc8e6f4a807f3471 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.modeling.core import Model, custom_model
__all__ = [
"discretize_model",
"KernelError",
"KernelSizeError",
"KernelArithmeticError",
]
class KernelError(Exception):
"""
Base error class for kernel errors.
"""
class KernelSizeError(KernelError):
"""
Called when size of kernels is even.
"""
class KernelArithmeticError(KernelError):
"""Called when doing invalid arithmetic with a kernel."""
def has_even_axis(array):
if isinstance(array, (list, tuple)):
return not len(array) % 2
else:
return any(not axes_size % 2 for axes_size in array.shape)
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2, center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2, center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(
center[1] - array_2.shape[1] // 2, center[1] + array_2.shape[1] // 2 + 1
)
slice_y = slice(
center[0] - array_2.shape[0] // 2, center[0] + array_2.shape[0] // 2 + 1
)
new_array[slice_y, slice_x] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(
center[1] - array_1.shape[1] // 2, center[1] + array_1.shape[1] // 2 + 1
)
slice_y = slice(
center[0] - array_1.shape[0] // 2, center[0] + array_1.shape[0] // 2 + 1
)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode="center", factor=10):
"""
Evaluate an analytical model function on a pixel grid.
Parameters
----------
model : `~astropy.modeling.Model` or callable.
Analytical model function to be discretized. A callable that is
not a `~astropy.modeling.Model` instance is converted to a model
using `~astropy.modeling.custom_model`.
x_range : 2-tuple
Lower and upper bounds of x pixel values at which the model is
evaluated. The upper bound is non-inclusive. A ``x_range`` of
``(0, 3)`` means the model will be evaluated at x pixels 0, 1,
and 2. The difference between the upper and lower bound must be
a whole number so that the output array size is well defined.
y_range : 2-tuple or `None`, optional
Lower and upper bounds of y pixel values at which the model is
evaluated. The upper bound is non-inclusive. A ``y_range`` of
``(0, 3)`` means the model will be evaluated at y pixels of 0,
1, and 2. The difference between the upper and lower bound must
be a whole number so that the output array size is well defined.
``y_range`` is necessary only for 2D models.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value at the center of
the pixel bins.
* ``'linear_interp'``
Discretize model by linearly interpolating between the
values at the edges (1D) or corners (2D) of the pixel
bins. For 2D models, the interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average of model values
in the pixel bins on an oversampled grid. Use the
``factor`` keyword to set the integer oversampling
factor.
* ``'integrate'``
Discretize model by integrating the model over the pixel
bins using `scipy.integrate.quad`. This mode conserves
the model integral on a subpixel scale, but is very
slow.
factor : int, optional
The integer oversampling factor used when ``mode='oversample'``.
Ignored otherwise.
Returns
-------
array : `numpy.ndarray`
The discretized model array.
Examples
--------
In this example, we define a
`~astropy.modeling.functional_models.Gaussian1D` model that has been
normalized so that it sums to 1.0. We then discretize this model
using the ``'center'``, ``'linear_interp'``, and ``'oversample'``
(with ``factor=10``) modes.
.. plot::
:show-source-link:
import matplotlib.pyplot as plt
import numpy as np
from astropy.convolution.utils import discretize_model
from astropy.modeling.models import Gaussian1D
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
x_range = (-2, 3)
x = np.arange(*x_range)
y_center = discretize_model(gauss_1D, x_range, mode='center')
y_edge = discretize_model(gauss_1D, x_range, mode='linear_interp')
y_oversample = discretize_model(gauss_1D, x_range, mode='oversample')
fig, ax = plt.subplots(figsize=(8, 6))
label = f'center (sum={y_center.sum():.3f})'
ax.plot(x, y_center, '.-', label=label)
label = f'linear_interp (sum={y_edge.sum():.3f})'
ax.plot(x, y_edge, '.-', label=label)
label = f'oversample (sum={y_oversample.sum():.3f})'
ax.plot(x, y_oversample, '.-', label=label)
ax.set_xlabel('x')
ax.set_ylabel('Value')
plt.legend()
"""
if not callable(model):
raise TypeError("Model must be callable.")
if not isinstance(model, Model):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError("discretize_model supports only 1D and 2D models.")
dxrange = np.diff(x_range)[0]
if dxrange != int(dxrange):
raise ValueError(
"The difference between the upper and lower limit of"
" 'x_range' must be a whole number."
)
if y_range:
dyrange = np.diff(y_range)[0]
if dyrange != int(dyrange):
raise ValueError(
"The difference between the upper and lower limit of"
" 'y_range' must be a whole number."
)
if factor != int(factor):
raise ValueError("factor must have an integer value")
factor = int(factor)
if ndim == 2 and y_range is None:
raise ValueError("y_range must be specified for a 2D model")
if ndim == 1 and y_range is not None:
raise ValueError("y_range should not be input for a 1D model")
if mode == "center":
if ndim == 1:
return discretize_center_1D(model, x_range)
elif ndim == 2:
return discretize_center_2D(model, x_range, y_range)
elif mode == "linear_interp":
if ndim == 1:
return discretize_linear_1D(model, x_range)
if ndim == 2:
return discretize_bilinear_2D(model, x_range, y_range)
elif mode == "oversample":
if ndim == 1:
return discretize_oversample_1D(model, x_range, factor)
if ndim == 2:
return discretize_oversample_2D(model, x_range, y_range, factor)
elif mode == "integrate":
if ndim == 1:
return discretize_integrate_1D(model, x_range)
if ndim == 2:
return discretize_integrate_2D(model, x_range, y_range)
else:
raise ValueError("Invalid mode for discretize_model.")
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:-1, :])
# Mean in x direction
values = 0.5 * (values[:, 1:] + values[:, :-1])
return values
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(
x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor),
)
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(
x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor),
)
y = np.linspace(
y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] - 0.5 * (1 + 1 / factor),
num=int((y_range[1] - y_range[0]) * factor),
)
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(
func=lambda y, x: model(x, y),
a=x[i],
b=x[i + 1],
gfun=lambda x: y[j],
hfun=lambda x: y[j + 1],
)[0]
return values
|
b01b23ef161d616dabe7d8567fecc757e87168fd41449710228cfb7548e0aa57 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from functools import partial
import numpy as np
from astropy import units as u
from astropy.modeling.convolution import Convolution
from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel
from astropy.nddata import support_nddata
from astropy.utils.console import human_file_size
from astropy.utils.exceptions import AstropyUserWarning
from ._convolve import _convolveNd_c
from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D
from .utils import KernelSizeError, has_even_axis
# np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)])
# fmt: off
_good_sizes = np.array(
[
0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12,
15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40,
45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90,
96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162,
180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288,
300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480,
486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720,
729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024,
1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458,
1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025,
2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700,
2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645,
3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800,
4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144,
6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776,
8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000,
]
)
# fmt: on
_good_range = int(np.log10(_good_sizes[-1]))
# Disabling doctests when scipy isn't present.
__doctest_requires__ = {("convolve_fft",): ["scipy.fft"]}
BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"]
def _next_fast_lengths(shape):
"""
Find optimal or good sizes to pad an array of ``shape`` to for better
performance with `numpy.fft.*fft` and `scipy.fft.*fft`.
Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise
looked up from list and scaled by powers of 10, if necessary.
"""
try:
import scipy.fft
return np.array([scipy.fft.next_fast_len(j) for j in shape])
except ImportError:
pass
newshape = np.empty(len(np.atleast_1d(shape)), dtype=int)
for i, j in enumerate(shape):
scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0)
for n in _good_sizes:
if n * scale >= j:
newshape[i] = n * scale
break
else:
raise ValueError(
f"No next fast length for {j} found in list of _good_sizes "
f"<= {_good_sizes[-1] * scale}."
)
return newshape
def _copy_input_if_needed(
input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None
):
# Alias input
input = input.array if isinstance(input, Kernel) else input
# strip quantity attributes
if hasattr(input, "unit"):
input = input.value
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there
# is no way to specify the return type or order etc. In addition
# ``np.nan`` is a ``float`` and there is no conversion to an
# ``int`` type. Therefore, a pre-fill copy is needed for non
# ``float`` masked arrays. ``subok=True`` is needed to retain
# ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill
# to act as the copy if type and order are already correct.
output = np.array(
input, dtype=dtype, copy=False, order=order, subok=True
)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(
input, dtype=dtype, copy=True, order=order, subok=False
)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass.
# If it is and `subok=False` (default), then it will copy even if `copy=False`. This
# uses less memory when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError(
"input should be a Numpy array or something convertible into a float array",
e,
)
return output
@support_nddata(data="array")
def convolve(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
mask=None,
preserve_nan=False,
normalization_zero_tol=1e-8,
):
"""
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``.
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked if it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
"""
if boundary not in BOUNDARY_OPTIONS:
raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}")
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(
passed_array,
dtype=float,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
array_dtype = getattr(passed_array, "dtype", array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(
passed_kernel,
dtype=float,
order="C",
nan_treatment=None,
mask=None,
fill_value=fill_value,
)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise KernelSizeError("Kernel size must be odd in all axes.")
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn(
"Both array and kernel are Kernel instances, hardwiring "
"the following parameters: boundary='fill', fill_value=0,"
" normalize_Kernel=True, nan_treatment='interpolate'",
AstropyUserWarning,
)
boundary = "fill"
fill_value = 0
normalize_kernel = True
nan_treatment = "interpolate"
# -----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
# -----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError(
"convolve only supports 1, 2, and 3-dimensional arrays at this time"
)
elif array_internal.ndim != kernel_internal.ndim:
raise Exception("array and kernel have differing number of dimensions.")
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape // 2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels
# would be ignored leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR
# array_shape > kernel_shape-1 OR
# array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is
# complete.
if boundary is None and not np.all(array_shape > 2 * pad_width):
raise KernelSizeError(
"for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead."
)
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == "interpolate") and np.isnan(
array_internal.sum()
)
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero:
if nan_interpolate:
raise ValueError(
"Setting nan_treatment='interpolate' "
"requires the kernel to be normalized, "
"but the input kernel has a sum close "
"to zero. For a zero-sum kernel and "
"data with NaNs, set nan_treatment='fill'."
)
else:
raise ValueError(
"The kernel can't be normalized, because "
"its sum is close to zero. The sum of the "
f"given kernel is < {1.0 / MAX_NORMALIZATION}"
)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == "fill":
initially_nan = np.isnan(array_internal)
if nan_treatment == "fill":
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order="C")
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ("fill", "extend", "wrap"):
embed_result_within_padded_region = False
if boundary == "fill":
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(
array_shape + 2 * pad_width,
fill_value=fill_value,
dtype=float,
order="C",
)
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of
# [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0]
] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
] = array_internal
else:
array_to_convolve[
pad_width[0] : array_shape[0] + pad_width[0],
pad_width[1] : array_shape[1] + pad_width[1],
pad_width[2] : array_shape[2] + pad_width[2],
] = array_internal
else:
np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape // 2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ((pad_width[0],), (pad_width[1],))
else:
np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],))
array_to_convolve = np.pad(
array_internal, pad_width=np_pad_width, mode=np_pad_mode
)
_convolveNd_c(
result,
array_to_convolve,
kernel_internal,
nan_interpolate,
embed_result_within_padded_region,
n_threads,
)
# So far, normalization has only occurred for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
elif nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn(
"nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.",
AstropyUserWarning,
)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
array_unit = getattr(passed_array, "unit", None)
if array_unit is not None:
result <<= array_unit
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
else:
raise TypeError("Only 1D and 2D Kernels are supported.")
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == "f":
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@support_nddata(data="array")
def convolve_fft(
array,
kernel,
boundary="fill",
fill_value=0.0,
nan_treatment="interpolate",
normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False,
mask=None,
crop=True,
return_fft=False,
fft_pad=None,
psf_pad=None,
min_wt=0.0,
allow_huge=False,
fftn=np.fft.fftn,
ifftn=np.fft.ifftn,
complex_dtype=complex,
dealias=False,
):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* It optionally pads to the nearest faster sizes to improve FFT speed.
These sizes are optimized for the numpy and scipy implementations, and
``fftconvolve`` uses them by default as well; when using other external
functions (see below), results may vary.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.org/project/pyFFTW/>`_ or
`pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also
offer somewhat better performance and a multi-threaded option.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution.
fill_value : float, optional
The value to use outside the array when using boundary='fill'.
nan_treatment : {'interpolate', 'fill'}, optional
The method used to handle NaNs in the input ``array``:
* ``'interpolate'``: ``NaN`` values are replaced with
interpolated values using the kernel as an interpolation
function. Note that if the kernel has a sum equal to
zero, NaN interpolation is not possible and will raise an
exception.
* ``'fill'``: ``NaN`` values are replaced by ``fill_value``
prior to convolution.
normalize_kernel : callable or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol : float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool, optional
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : None or ndarray, optional
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked if it is masked in either ``mask`` *or* ``array.mask``.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fft_pad : bool, optional
Default on. Zero-pad image to the nearest size supporting more efficient
execution of the FFT, generally values factorizable into the first 3-5
prime numbers. With ``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below.
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB.
fftn : callable, optional
The fft function. Can be overridden to use your own ffts,
e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``.
ifftn : callable, optional
The inverse fft function. Can be overridden the same way ``fttn``.
complex_dtype : complex type, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
dealias: bool, optional
Default off. Zero-pad image to enable explicit dealiasing
of convolution. With ``boundary='wrap'``, this will be disabled.
Note that for an input of nd dimensions this will increase
the size of the temporary arrays by at least ``1.5**nd``.
This may result in significantly more memory usage.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size.
Raises
------
`ValueError`
If the array is bigger than 1 GB after padding, will raise this
exception unless ``allow_huge`` is True.
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data
can become large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 and the update in
https://github.com/astropy/astropy/pull/11533 for further details.
Dealiasing of pseudospectral convolutions is necessary for
numerical stability of the underlying algorithms. A common
method for handling this is to zero pad the image by at least
1/2 to eliminate the wavenumbers which have been aliased
by convolution. This is so that the aliased 1/3 of the
results of the convolution computation can be thrown out. See
https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2
https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037
Note that if dealiasing is necessary to your application, but your
process is memory constrained, you may want to consider using
FFTW++: https://github.com/dealias/fftwpp. It includes python
wrappers for a pseudospectral convolution which will implicitly
dealias your convolution without the need for additional padding.
Note that one cannot use FFTW++'s convlution directly in this
method as in handles the entire convolution process internally.
Additionally, FFTW++ includes other useful pseudospectral methods to
consider.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([0.33333333, 1.33333333, 1. ])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([0.5, 2. , 1.5])
>>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP
array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00])
>>> convolve_fft([1, 2, 3], [1])
array([1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
array([1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([0.5, 2. , 1.5])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([0.5, 2. , 1.5])
>>> import scipy.fft # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn)
array([0.5, 2. , 1.5])
>>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores
>>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1)
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp)
array([0.5, 2. , 1.5])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError(
"Can't convolve two kernels with convolve_fft. Use convolve instead."
)
if nan_treatment not in ("interpolate", "fill"):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Get array quantity if it exists
array_unit = getattr(array, "unit", None)
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(
array,
dtype=complex,
order="C",
nan_treatment=nan_treatment,
mask=mask,
fill_value=np.nan,
)
kernel = _copy_input_if_needed(
kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0
)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (
np.prod(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_B > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_B)}. "
"Use allow_huge=True to override this exception."
)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
if nan_treatment == "fill":
array[nanmaskarray] = fill_value
else:
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1.0 / MAX_NORMALIZATION:
raise Exception(
"The kernel can't be normalized, because its sum is close to zero. The"
f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}"
)
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == "interpolate":
raise ValueError(
"Cannot interpolate NaNs with an unnormalizable kernel"
)
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn(
"The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary",
AstropyUserWarning,
)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == "fill":
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn(
f"psf_pad was set to {psf_pad}, which overrides the "
"boundary='fill' setting.",
AstropyUserWarning,
)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == "wrap":
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
if dealias:
raise ValueError("With boundary='wrap', dealias cannot be enabled.")
fill_value = 0 # force zero; it should not be used
elif boundary == "extend":
raise NotImplementedError(
"The 'extend' option is not implemented for fft-based convolution"
)
# Add shapes elementwise for psf_pad.
if psf_pad: # default=False
# add the sizes along each dimension (bigger)
newshape = np.array(arrayshape) + np.array(kernshape)
else:
# take the larger shape in each dimension (smaller)
newshape = np.maximum(arrayshape, kernshape)
if dealias:
# Extend shape by 1/2 for dealiasing
newshape += np.ceil(newshape / 2).astype(int)
# Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5).
if fft_pad: # default=True
# Get optimized sizes from scipy.
newshape = _next_fast_lengths(newshape)
# perform a second check after padding
array_size_C = (
np.prod(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize
) * u.byte
if array_size_C > 1 * u.GB and not allow_huge:
raise ValueError(
f"Size Error: Arrays will be {human_file_size(array_size_C)}. "
"Use allow_huge=True to override this exception."
)
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.prod(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.prod(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.prod(arrayshape)*np.dtype(bool).itemsize
# + np.prod(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [
slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2)
]
kernslices += [
slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2)
]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = nan_treatment == "interpolate"
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if array_unit is not None:
fftmult <<= array_unit
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide="ignore", invalid="ignore"):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.0:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(
array,
kernel,
nan_treatment="interpolate",
normalize_kernel=True,
preserve_nan=False,
**kwargs,
)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
if mode == "convolve_fft":
operator = SPECIAL_OPERATORS.add(
"convolve_fft", partial(convolve_fft, **kwargs)
)
elif mode == "convolve":
operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs))
else:
raise ValueError(f"Mode {mode} is not supported.")
return CompoundModel(operator, model, kernel)
def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
bounding_box : tuple
The bounding box which encompasses enough of the support of both
the ``model`` and ``kernel`` so that an accurate convolution can be
computed.
resolution : float
The resolution that one wishes to approximate the convolution
integral at.
cache : optional, bool
Default value True. Allow for the storage of the convolution
computation for later reuse.
**kwargs : dict
Keyword arguments to be passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs))
return Convolution(operator, model, kernel, bounding_box, resolution, cache)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.